1 /*
2 * V4L2 clock service
3 *
4 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/atomic.h>
12 #include <linux/device.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19
20 #include <media/v4l2-clk.h>
21 #include <media/v4l2-subdev.h>
22
23 static DEFINE_MUTEX(clk_lock);
24 static LIST_HEAD(clk_list);
25
v4l2_clk_find(const char * dev_id,const char * id)26 static struct v4l2_clk *v4l2_clk_find(const char *dev_id, const char *id)
27 {
28 struct v4l2_clk *clk;
29
30 list_for_each_entry(clk, &clk_list, list) {
31 if (strcmp(dev_id, clk->dev_id))
32 continue;
33
34 if (!id || !clk->id || !strcmp(clk->id, id))
35 return clk;
36 }
37
38 return ERR_PTR(-ENODEV);
39 }
40
v4l2_clk_get(struct device * dev,const char * id)41 struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
42 {
43 struct v4l2_clk *clk;
44
45 mutex_lock(&clk_lock);
46 clk = v4l2_clk_find(dev_name(dev), id);
47
48 if (!IS_ERR(clk))
49 atomic_inc(&clk->use_count);
50 mutex_unlock(&clk_lock);
51
52 return clk;
53 }
54 EXPORT_SYMBOL(v4l2_clk_get);
55
v4l2_clk_put(struct v4l2_clk * clk)56 void v4l2_clk_put(struct v4l2_clk *clk)
57 {
58 struct v4l2_clk *tmp;
59
60 if (IS_ERR(clk))
61 return;
62
63 mutex_lock(&clk_lock);
64
65 list_for_each_entry(tmp, &clk_list, list)
66 if (tmp == clk)
67 atomic_dec(&clk->use_count);
68
69 mutex_unlock(&clk_lock);
70 }
71 EXPORT_SYMBOL(v4l2_clk_put);
72
v4l2_clk_lock_driver(struct v4l2_clk * clk)73 static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
74 {
75 struct v4l2_clk *tmp;
76 int ret = -ENODEV;
77
78 mutex_lock(&clk_lock);
79
80 list_for_each_entry(tmp, &clk_list, list)
81 if (tmp == clk) {
82 ret = !try_module_get(clk->ops->owner);
83 if (ret)
84 ret = -EFAULT;
85 break;
86 }
87
88 mutex_unlock(&clk_lock);
89
90 return ret;
91 }
92
v4l2_clk_unlock_driver(struct v4l2_clk * clk)93 static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
94 {
95 module_put(clk->ops->owner);
96 }
97
v4l2_clk_enable(struct v4l2_clk * clk)98 int v4l2_clk_enable(struct v4l2_clk *clk)
99 {
100 int ret = v4l2_clk_lock_driver(clk);
101
102 if (ret < 0)
103 return ret;
104
105 mutex_lock(&clk->lock);
106
107 if (++clk->enable == 1 && clk->ops->enable) {
108 ret = clk->ops->enable(clk);
109 if (ret < 0)
110 clk->enable--;
111 }
112
113 mutex_unlock(&clk->lock);
114
115 return ret;
116 }
117 EXPORT_SYMBOL(v4l2_clk_enable);
118
119 /*
120 * You might Oops if you try to disabled a disabled clock, because then the
121 * driver isn't locked and could have been unloaded by now, so, don't do that
122 */
v4l2_clk_disable(struct v4l2_clk * clk)123 void v4l2_clk_disable(struct v4l2_clk *clk)
124 {
125 int enable;
126
127 mutex_lock(&clk->lock);
128
129 enable = --clk->enable;
130 if (WARN(enable < 0, "Unbalanced %s() on %s:%s!\n", __func__,
131 clk->dev_id, clk->id))
132 clk->enable++;
133 else if (!enable && clk->ops->disable)
134 clk->ops->disable(clk);
135
136 mutex_unlock(&clk->lock);
137
138 v4l2_clk_unlock_driver(clk);
139 }
140 EXPORT_SYMBOL(v4l2_clk_disable);
141
v4l2_clk_get_rate(struct v4l2_clk * clk)142 unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
143 {
144 int ret = v4l2_clk_lock_driver(clk);
145
146 if (ret < 0)
147 return ret;
148
149 mutex_lock(&clk->lock);
150 if (!clk->ops->get_rate)
151 ret = -ENOSYS;
152 else
153 ret = clk->ops->get_rate(clk);
154 mutex_unlock(&clk->lock);
155
156 v4l2_clk_unlock_driver(clk);
157
158 return ret;
159 }
160 EXPORT_SYMBOL(v4l2_clk_get_rate);
161
v4l2_clk_set_rate(struct v4l2_clk * clk,unsigned long rate)162 int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
163 {
164 int ret = v4l2_clk_lock_driver(clk);
165
166 if (ret < 0)
167 return ret;
168
169 mutex_lock(&clk->lock);
170 if (!clk->ops->set_rate)
171 ret = -ENOSYS;
172 else
173 ret = clk->ops->set_rate(clk, rate);
174 mutex_unlock(&clk->lock);
175
176 v4l2_clk_unlock_driver(clk);
177
178 return ret;
179 }
180 EXPORT_SYMBOL(v4l2_clk_set_rate);
181
v4l2_clk_register(const struct v4l2_clk_ops * ops,const char * dev_id,const char * id,void * priv)182 struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
183 const char *dev_id,
184 const char *id, void *priv)
185 {
186 struct v4l2_clk *clk;
187 int ret;
188
189 if (!ops || !dev_id)
190 return ERR_PTR(-EINVAL);
191
192 clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
193 if (!clk)
194 return ERR_PTR(-ENOMEM);
195
196 clk->id = kstrdup(id, GFP_KERNEL);
197 clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
198 if ((id && !clk->id) || !clk->dev_id) {
199 ret = -ENOMEM;
200 goto ealloc;
201 }
202 clk->ops = ops;
203 clk->priv = priv;
204 atomic_set(&clk->use_count, 0);
205 mutex_init(&clk->lock);
206
207 mutex_lock(&clk_lock);
208 if (!IS_ERR(v4l2_clk_find(dev_id, id))) {
209 mutex_unlock(&clk_lock);
210 ret = -EEXIST;
211 goto eexist;
212 }
213 list_add_tail(&clk->list, &clk_list);
214 mutex_unlock(&clk_lock);
215
216 return clk;
217
218 eexist:
219 ealloc:
220 kfree(clk->id);
221 kfree(clk->dev_id);
222 kfree(clk);
223 return ERR_PTR(ret);
224 }
225 EXPORT_SYMBOL(v4l2_clk_register);
226
v4l2_clk_unregister(struct v4l2_clk * clk)227 void v4l2_clk_unregister(struct v4l2_clk *clk)
228 {
229 if (WARN(atomic_read(&clk->use_count),
230 "%s(): Refusing to unregister ref-counted %s:%s clock!\n",
231 __func__, clk->dev_id, clk->id))
232 return;
233
234 mutex_lock(&clk_lock);
235 list_del(&clk->list);
236 mutex_unlock(&clk_lock);
237
238 kfree(clk->id);
239 kfree(clk->dev_id);
240 kfree(clk);
241 }
242 EXPORT_SYMBOL(v4l2_clk_unregister);
243
244 struct v4l2_clk_fixed {
245 unsigned long rate;
246 struct v4l2_clk_ops ops;
247 };
248
fixed_get_rate(struct v4l2_clk * clk)249 static unsigned long fixed_get_rate(struct v4l2_clk *clk)
250 {
251 struct v4l2_clk_fixed *priv = clk->priv;
252 return priv->rate;
253 }
254
__v4l2_clk_register_fixed(const char * dev_id,const char * id,unsigned long rate,struct module * owner)255 struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
256 const char *id, unsigned long rate, struct module *owner)
257 {
258 struct v4l2_clk *clk;
259 struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
260
261 if (!priv)
262 return ERR_PTR(-ENOMEM);
263
264 priv->rate = rate;
265 priv->ops.get_rate = fixed_get_rate;
266 priv->ops.owner = owner;
267
268 clk = v4l2_clk_register(&priv->ops, dev_id, id, priv);
269 if (IS_ERR(clk))
270 kfree(priv);
271
272 return clk;
273 }
274 EXPORT_SYMBOL(__v4l2_clk_register_fixed);
275
v4l2_clk_unregister_fixed(struct v4l2_clk * clk)276 void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
277 {
278 kfree(clk->priv);
279 v4l2_clk_unregister(clk);
280 }
281 EXPORT_SYMBOL(v4l2_clk_unregister_fixed);
282