• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * drivers/clk/clkdev.c
4  *
5  *  Copyright (C) 2008 Russell King.
6  *
7  * Helper for the clk API to assist looking up a struct clk.
8  */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/device.h>
12 #include <linux/list.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/string.h>
16 #include <linux/mutex.h>
17 #include <linux/clk.h>
18 #include <linux/clkdev.h>
19 #include <linux/clk-provider.h>
20 #include <linux/of.h>
21 
22 #include "clk.h"
23 
24 static LIST_HEAD(clocks);
25 static DEFINE_MUTEX(clocks_mutex);
26 
27 /*
28  * Find the correct struct clk for the device and connection ID.
29  * We do slightly fuzzy matching here:
30  *  An entry with a NULL ID is assumed to be a wildcard.
31  *  If an entry has a device ID, it must match
32  *  If an entry has a connection ID, it must match
33  * Then we take the most specific entry - with the following
34  * order of precedence: dev+con > dev only > con only.
35  */
clk_find(const char * dev_id,const char * con_id)36 static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
37 {
38 	struct clk_lookup *p, *cl = NULL;
39 	int match, best_found = 0, best_possible = 0;
40 
41 	if (dev_id)
42 		best_possible += 2;
43 	if (con_id)
44 		best_possible += 1;
45 
46 	lockdep_assert_held(&clocks_mutex);
47 
48 	list_for_each_entry(p, &clocks, node) {
49 		match = 0;
50 		if (p->dev_id) {
51 			if (!dev_id || strcmp(p->dev_id, dev_id))
52 				continue;
53 			match += 2;
54 		}
55 		if (p->con_id) {
56 			if (!con_id || strcmp(p->con_id, con_id))
57 				continue;
58 			match += 1;
59 		}
60 
61 		if (match > best_found) {
62 			cl = p;
63 			if (match != best_possible)
64 				best_found = match;
65 			else
66 				break;
67 		}
68 	}
69 	return cl;
70 }
71 
clk_find_hw(const char * dev_id,const char * con_id)72 struct clk_hw *clk_find_hw(const char *dev_id, const char *con_id)
73 {
74 	struct clk_lookup *cl;
75 	struct clk_hw *hw = ERR_PTR(-ENOENT);
76 
77 	mutex_lock(&clocks_mutex);
78 	cl = clk_find(dev_id, con_id);
79 	if (cl)
80 		hw = cl->clk_hw;
81 	mutex_unlock(&clocks_mutex);
82 
83 	return hw;
84 }
85 
__clk_get_sys(struct device * dev,const char * dev_id,const char * con_id)86 static struct clk *__clk_get_sys(struct device *dev, const char *dev_id,
87 				 const char *con_id)
88 {
89 	struct clk_hw *hw = clk_find_hw(dev_id, con_id);
90 
91 	return clk_hw_create_clk(dev, hw, dev_id, con_id);
92 }
93 
clk_get_sys(const char * dev_id,const char * con_id)94 struct clk *clk_get_sys(const char *dev_id, const char *con_id)
95 {
96 	return __clk_get_sys(NULL, dev_id, con_id);
97 }
98 EXPORT_SYMBOL(clk_get_sys);
99 
clk_get(struct device * dev,const char * con_id)100 struct clk *clk_get(struct device *dev, const char *con_id)
101 {
102 	const char *dev_id = dev ? dev_name(dev) : NULL;
103 	struct clk_hw *hw;
104 
105 	if (dev && dev->of_node) {
106 		hw = of_clk_get_hw(dev->of_node, 0, con_id);
107 		if (!IS_ERR(hw) || PTR_ERR(hw) == -EPROBE_DEFER)
108 			return clk_hw_create_clk(dev, hw, dev_id, con_id);
109 	}
110 
111 	return __clk_get_sys(dev, dev_id, con_id);
112 }
113 EXPORT_SYMBOL(clk_get);
114 
clk_put(struct clk * clk)115 void clk_put(struct clk *clk)
116 {
117 	__clk_put(clk);
118 }
119 EXPORT_SYMBOL(clk_put);
120 
__clkdev_add(struct clk_lookup * cl)121 static void __clkdev_add(struct clk_lookup *cl)
122 {
123 	mutex_lock(&clocks_mutex);
124 	list_add_tail(&cl->node, &clocks);
125 	mutex_unlock(&clocks_mutex);
126 }
127 
clkdev_add(struct clk_lookup * cl)128 void clkdev_add(struct clk_lookup *cl)
129 {
130 	if (!cl->clk_hw)
131 		cl->clk_hw = __clk_get_hw(cl->clk);
132 	__clkdev_add(cl);
133 }
134 EXPORT_SYMBOL(clkdev_add);
135 
clkdev_add_table(struct clk_lookup * cl,size_t num)136 void clkdev_add_table(struct clk_lookup *cl, size_t num)
137 {
138 	mutex_lock(&clocks_mutex);
139 	while (num--) {
140 		cl->clk_hw = __clk_get_hw(cl->clk);
141 		list_add_tail(&cl->node, &clocks);
142 		cl++;
143 	}
144 	mutex_unlock(&clocks_mutex);
145 }
146 
147 #define MAX_DEV_ID	20
148 #define MAX_CON_ID	16
149 
150 struct clk_lookup_alloc {
151 	struct clk_lookup cl;
152 	char	dev_id[MAX_DEV_ID];
153 	char	con_id[MAX_CON_ID];
154 };
155 
156 static struct clk_lookup * __ref
vclkdev_alloc(struct clk_hw * hw,const char * con_id,const char * dev_fmt,va_list ap)157 vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
158 	va_list ap)
159 {
160 	struct clk_lookup_alloc *cla;
161 
162 	cla = kzalloc(sizeof(*cla), GFP_KERNEL);
163 	if (!cla)
164 		return NULL;
165 
166 	cla->cl.clk_hw = hw;
167 	if (con_id) {
168 		strlcpy(cla->con_id, con_id, sizeof(cla->con_id));
169 		cla->cl.con_id = cla->con_id;
170 	}
171 
172 	if (dev_fmt) {
173 		vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
174 		cla->cl.dev_id = cla->dev_id;
175 	}
176 
177 	return &cla->cl;
178 }
179 
180 static struct clk_lookup *
vclkdev_create(struct clk_hw * hw,const char * con_id,const char * dev_fmt,va_list ap)181 vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
182 	va_list ap)
183 {
184 	struct clk_lookup *cl;
185 
186 	cl = vclkdev_alloc(hw, con_id, dev_fmt, ap);
187 	if (cl)
188 		__clkdev_add(cl);
189 
190 	return cl;
191 }
192 
193 struct clk_lookup * __ref
clkdev_alloc(struct clk * clk,const char * con_id,const char * dev_fmt,...)194 clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
195 {
196 	struct clk_lookup *cl;
197 	va_list ap;
198 
199 	va_start(ap, dev_fmt);
200 	cl = vclkdev_alloc(__clk_get_hw(clk), con_id, dev_fmt, ap);
201 	va_end(ap);
202 
203 	return cl;
204 }
205 EXPORT_SYMBOL(clkdev_alloc);
206 
207 struct clk_lookup *
clkdev_hw_alloc(struct clk_hw * hw,const char * con_id,const char * dev_fmt,...)208 clkdev_hw_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt, ...)
209 {
210 	struct clk_lookup *cl;
211 	va_list ap;
212 
213 	va_start(ap, dev_fmt);
214 	cl = vclkdev_alloc(hw, con_id, dev_fmt, ap);
215 	va_end(ap);
216 
217 	return cl;
218 }
219 EXPORT_SYMBOL(clkdev_hw_alloc);
220 
221 /**
222  * clkdev_create - allocate and add a clkdev lookup structure
223  * @clk: struct clk to associate with all clk_lookups
224  * @con_id: connection ID string on device
225  * @dev_fmt: format string describing device name
226  *
227  * Returns a clk_lookup structure, which can be later unregistered and
228  * freed.
229  */
clkdev_create(struct clk * clk,const char * con_id,const char * dev_fmt,...)230 struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
231 	const char *dev_fmt, ...)
232 {
233 	struct clk_lookup *cl;
234 	va_list ap;
235 
236 	va_start(ap, dev_fmt);
237 	cl = vclkdev_create(__clk_get_hw(clk), con_id, dev_fmt, ap);
238 	va_end(ap);
239 
240 	return cl;
241 }
242 EXPORT_SYMBOL_GPL(clkdev_create);
243 
244 /**
245  * clkdev_hw_create - allocate and add a clkdev lookup structure
246  * @hw: struct clk_hw to associate with all clk_lookups
247  * @con_id: connection ID string on device
248  * @dev_fmt: format string describing device name
249  *
250  * Returns a clk_lookup structure, which can be later unregistered and
251  * freed.
252  */
clkdev_hw_create(struct clk_hw * hw,const char * con_id,const char * dev_fmt,...)253 struct clk_lookup *clkdev_hw_create(struct clk_hw *hw, const char *con_id,
254 	const char *dev_fmt, ...)
255 {
256 	struct clk_lookup *cl;
257 	va_list ap;
258 
259 	va_start(ap, dev_fmt);
260 	cl = vclkdev_create(hw, con_id, dev_fmt, ap);
261 	va_end(ap);
262 
263 	return cl;
264 }
265 EXPORT_SYMBOL_GPL(clkdev_hw_create);
266 
clk_add_alias(const char * alias,const char * alias_dev_name,const char * con_id,struct device * dev)267 int clk_add_alias(const char *alias, const char *alias_dev_name,
268 	const char *con_id, struct device *dev)
269 {
270 	struct clk *r = clk_get(dev, con_id);
271 	struct clk_lookup *l;
272 
273 	if (IS_ERR(r))
274 		return PTR_ERR(r);
275 
276 	l = clkdev_create(r, alias, alias_dev_name ? "%s" : NULL,
277 			  alias_dev_name);
278 	clk_put(r);
279 
280 	return l ? 0 : -ENODEV;
281 }
282 EXPORT_SYMBOL(clk_add_alias);
283 
284 /*
285  * clkdev_drop - remove a clock dynamically allocated
286  */
clkdev_drop(struct clk_lookup * cl)287 void clkdev_drop(struct clk_lookup *cl)
288 {
289 	mutex_lock(&clocks_mutex);
290 	list_del(&cl->node);
291 	mutex_unlock(&clocks_mutex);
292 	kfree(cl);
293 }
294 EXPORT_SYMBOL(clkdev_drop);
295 
__clk_register_clkdev(struct clk_hw * hw,const char * con_id,const char * dev_id,...)296 static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
297 						const char *con_id,
298 						const char *dev_id, ...)
299 {
300 	struct clk_lookup *cl;
301 	va_list ap;
302 
303 	va_start(ap, dev_id);
304 	cl = vclkdev_create(hw, con_id, dev_id, ap);
305 	va_end(ap);
306 
307 	return cl;
308 }
309 
do_clk_register_clkdev(struct clk_hw * hw,struct clk_lookup ** cl,const char * con_id,const char * dev_id)310 static int do_clk_register_clkdev(struct clk_hw *hw,
311 	struct clk_lookup **cl, const char *con_id, const char *dev_id)
312 {
313 	if (IS_ERR(hw))
314 		return PTR_ERR(hw);
315 	/*
316 	 * Since dev_id can be NULL, and NULL is handled specially, we must
317 	 * pass it as either a NULL format string, or with "%s".
318 	 */
319 	if (dev_id)
320 		*cl = __clk_register_clkdev(hw, con_id, "%s", dev_id);
321 	else
322 		*cl = __clk_register_clkdev(hw, con_id, NULL);
323 
324 	return *cl ? 0 : -ENOMEM;
325 }
326 
327 /**
328  * clk_register_clkdev - register one clock lookup for a struct clk
329  * @clk: struct clk to associate with all clk_lookups
330  * @con_id: connection ID string on device
331  * @dev_id: string describing device name
332  *
333  * con_id or dev_id may be NULL as a wildcard, just as in the rest of
334  * clkdev.
335  *
336  * To make things easier for mass registration, we detect error clks
337  * from a previous clk_register() call, and return the error code for
338  * those.  This is to permit this function to be called immediately
339  * after clk_register().
340  */
clk_register_clkdev(struct clk * clk,const char * con_id,const char * dev_id)341 int clk_register_clkdev(struct clk *clk, const char *con_id,
342 	const char *dev_id)
343 {
344 	struct clk_lookup *cl;
345 
346 	if (IS_ERR(clk))
347 		return PTR_ERR(clk);
348 
349 	return do_clk_register_clkdev(__clk_get_hw(clk), &cl, con_id,
350 					      dev_id);
351 }
352 EXPORT_SYMBOL(clk_register_clkdev);
353 
354 /**
355  * clk_hw_register_clkdev - register one clock lookup for a struct clk_hw
356  * @hw: struct clk_hw to associate with all clk_lookups
357  * @con_id: connection ID string on device
358  * @dev_id: format string describing device name
359  *
360  * con_id or dev_id may be NULL as a wildcard, just as in the rest of
361  * clkdev.
362  *
363  * To make things easier for mass registration, we detect error clk_hws
364  * from a previous clk_hw_register_*() call, and return the error code for
365  * those.  This is to permit this function to be called immediately
366  * after clk_hw_register_*().
367  */
clk_hw_register_clkdev(struct clk_hw * hw,const char * con_id,const char * dev_id)368 int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
369 	const char *dev_id)
370 {
371 	struct clk_lookup *cl;
372 
373 	return do_clk_register_clkdev(hw, &cl, con_id, dev_id);
374 }
375 EXPORT_SYMBOL(clk_hw_register_clkdev);
376 
devm_clkdev_release(struct device * dev,void * res)377 static void devm_clkdev_release(struct device *dev, void *res)
378 {
379 	clkdev_drop(*(struct clk_lookup **)res);
380 }
381 
devm_clk_match_clkdev(struct device * dev,void * res,void * data)382 static int devm_clk_match_clkdev(struct device *dev, void *res, void *data)
383 {
384 	struct clk_lookup **l = res;
385 
386 	return *l == data;
387 }
388 
389 /**
390  * devm_clk_release_clkdev - Resource managed clkdev lookup release
391  * @dev: device this lookup is bound
392  * @con_id: connection ID string on device
393  * @dev_id: format string describing device name
394  *
395  * Drop the clkdev lookup created with devm_clk_hw_register_clkdev.
396  * Normally this function will not need to be called and the resource
397  * management code will ensure that the resource is freed.
398  */
devm_clk_release_clkdev(struct device * dev,const char * con_id,const char * dev_id)399 void devm_clk_release_clkdev(struct device *dev, const char *con_id,
400 			     const char *dev_id)
401 {
402 	struct clk_lookup *cl;
403 	int rval;
404 
405 	mutex_lock(&clocks_mutex);
406 	cl = clk_find(dev_id, con_id);
407 	mutex_unlock(&clocks_mutex);
408 
409 	WARN_ON(!cl);
410 	rval = devres_release(dev, devm_clkdev_release,
411 			      devm_clk_match_clkdev, cl);
412 	WARN_ON(rval);
413 }
414 EXPORT_SYMBOL(devm_clk_release_clkdev);
415 
416 /**
417  * devm_clk_hw_register_clkdev - managed clk lookup registration for clk_hw
418  * @dev: device this lookup is bound
419  * @hw: struct clk_hw to associate with all clk_lookups
420  * @con_id: connection ID string on device
421  * @dev_id: format string describing device name
422  *
423  * con_id or dev_id may be NULL as a wildcard, just as in the rest of
424  * clkdev.
425  *
426  * To make things easier for mass registration, we detect error clk_hws
427  * from a previous clk_hw_register_*() call, and return the error code for
428  * those.  This is to permit this function to be called immediately
429  * after clk_hw_register_*().
430  */
devm_clk_hw_register_clkdev(struct device * dev,struct clk_hw * hw,const char * con_id,const char * dev_id)431 int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
432 				const char *con_id, const char *dev_id)
433 {
434 	int rval = -ENOMEM;
435 	struct clk_lookup **cl;
436 
437 	cl = devres_alloc(devm_clkdev_release, sizeof(*cl), GFP_KERNEL);
438 	if (cl) {
439 		rval = do_clk_register_clkdev(hw, cl, con_id, dev_id);
440 		if (!rval)
441 			devres_add(dev, cl);
442 		else
443 			devres_free(cl);
444 	}
445 	return rval;
446 }
447 EXPORT_SYMBOL(devm_clk_hw_register_clkdev);
448