1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
3 */
4
5 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
6
7 #include <linux/mutex.h>
8 #include <linux/errno.h>
9 #include <linux/slab.h>
10
11 #include "dpu_hw_mdss.h"
12 #include "dpu_hw_blk.h"
13
14 /* Serialization lock for dpu_hw_blk_list */
15 static DEFINE_MUTEX(dpu_hw_blk_lock);
16
17 /* List of all hw block objects */
18 static LIST_HEAD(dpu_hw_blk_list);
19
20 /**
21 * dpu_hw_blk_init - initialize hw block object
22 * @type: hw block type - enum dpu_hw_blk_type
23 * @id: instance id of the hw block
24 * @ops: Pointer to block operations
25 */
dpu_hw_blk_init(struct dpu_hw_blk * hw_blk,u32 type,int id,struct dpu_hw_blk_ops * ops)26 void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
27 struct dpu_hw_blk_ops *ops)
28 {
29 INIT_LIST_HEAD(&hw_blk->list);
30 hw_blk->type = type;
31 hw_blk->id = id;
32 atomic_set(&hw_blk->refcount, 0);
33
34 if (ops)
35 hw_blk->ops = *ops;
36
37 mutex_lock(&dpu_hw_blk_lock);
38 list_add(&hw_blk->list, &dpu_hw_blk_list);
39 mutex_unlock(&dpu_hw_blk_lock);
40 }
41
42 /**
43 * dpu_hw_blk_destroy - destroy hw block object.
44 * @hw_blk: pointer to hw block object
45 * return: none
46 */
dpu_hw_blk_destroy(struct dpu_hw_blk * hw_blk)47 void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk)
48 {
49 if (!hw_blk) {
50 pr_err("invalid parameters\n");
51 return;
52 }
53
54 if (atomic_read(&hw_blk->refcount))
55 pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
56 hw_blk->id);
57
58 mutex_lock(&dpu_hw_blk_lock);
59 list_del(&hw_blk->list);
60 mutex_unlock(&dpu_hw_blk_lock);
61 }
62
63 /**
64 * dpu_hw_blk_get - get hw_blk from free pool
65 * @hw_blk: if specified, increment reference count only
66 * @type: if hw_blk is not specified, allocate the next available of this type
67 * @id: if specified (>= 0), allocate the given instance of the above type
68 * return: pointer to hw block object
69 */
dpu_hw_blk_get(struct dpu_hw_blk * hw_blk,u32 type,int id)70 struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id)
71 {
72 struct dpu_hw_blk *curr;
73 int rc, refcount;
74
75 if (!hw_blk) {
76 mutex_lock(&dpu_hw_blk_lock);
77 list_for_each_entry(curr, &dpu_hw_blk_list, list) {
78 if ((curr->type != type) ||
79 (id >= 0 && curr->id != id) ||
80 (id < 0 &&
81 atomic_read(&curr->refcount)))
82 continue;
83
84 hw_blk = curr;
85 break;
86 }
87 mutex_unlock(&dpu_hw_blk_lock);
88 }
89
90 if (!hw_blk) {
91 pr_debug("no hw_blk:%d\n", type);
92 return NULL;
93 }
94
95 refcount = atomic_inc_return(&hw_blk->refcount);
96
97 if (refcount == 1 && hw_blk->ops.start) {
98 rc = hw_blk->ops.start(hw_blk);
99 if (rc) {
100 pr_err("failed to start hw_blk:%d rc:%d\n", type, rc);
101 goto error_start;
102 }
103 }
104
105 pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
106 hw_blk->id, refcount);
107 return hw_blk;
108
109 error_start:
110 dpu_hw_blk_put(hw_blk);
111 return ERR_PTR(rc);
112 }
113
114 /**
115 * dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
116 * @hw_blk: hw block to be freed
117 * @free_blk: function to be called when reference count goes to zero
118 */
dpu_hw_blk_put(struct dpu_hw_blk * hw_blk)119 void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
120 {
121 if (!hw_blk) {
122 pr_err("invalid parameters\n");
123 return;
124 }
125
126 pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
127 atomic_read(&hw_blk->refcount));
128
129 if (!atomic_read(&hw_blk->refcount)) {
130 pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
131 return;
132 }
133
134 if (atomic_dec_return(&hw_blk->refcount))
135 return;
136
137 if (hw_blk->ops.stop)
138 hw_blk->ops.stop(hw_blk);
139 }
140