• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
4  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5  *
6  * Standard functionality for the common clock API.  See Documentation/driver-api/clk.rst
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/clk-provider.h>
11 #include <linux/clk/clk-conf.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/spinlock.h>
15 #include <linux/err.h>
16 #include <linux/list.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/sched.h>
23 #include <linux/clkdev.h>
24 
25 #include "clk.h"
26 
27 static DEFINE_SPINLOCK(enable_lock);
28 static DEFINE_MUTEX(prepare_lock);
29 
30 static struct task_struct *prepare_owner;
31 static struct task_struct *enable_owner;
32 
33 static int prepare_refcnt;
34 static int enable_refcnt;
35 
36 static HLIST_HEAD(clk_root_list);
37 static HLIST_HEAD(clk_orphan_list);
38 static LIST_HEAD(clk_notifier_list);
39 
40 static struct hlist_head *all_lists[] = {
41 	&clk_root_list,
42 	&clk_orphan_list,
43 	NULL,
44 };
45 
46 /***    private data structures    ***/
47 
48 struct clk_parent_map {
49 	const struct clk_hw	*hw;
50 	struct clk_core		*core;
51 	const char		*fw_name;
52 	const char		*name;
53 	int			index;
54 };
55 
56 struct clk_core {
57 	const char		*name;
58 	const struct clk_ops	*ops;
59 	struct clk_hw		*hw;
60 	struct module		*owner;
61 	struct device		*dev;
62 	struct device_node	*of_node;
63 	struct clk_core		*parent;
64 	struct clk_parent_map	*parents;
65 	u8			num_parents;
66 	u8			new_parent_index;
67 	unsigned long		rate;
68 	unsigned long		req_rate;
69 	unsigned long		new_rate;
70 	struct clk_core		*new_parent;
71 	struct clk_core		*new_child;
72 	unsigned long		flags;
73 	bool			orphan;
74 	bool			rpm_enabled;
75 	bool			need_sync;
76 	bool			boot_enabled;
77 	unsigned int		enable_count;
78 	unsigned int		prepare_count;
79 	unsigned int		protect_count;
80 	unsigned long		min_rate;
81 	unsigned long		max_rate;
82 	unsigned long		accuracy;
83 	int			phase;
84 	struct clk_duty		duty;
85 	struct hlist_head	children;
86 	struct hlist_node	child_node;
87 	struct hlist_head	clks;
88 	unsigned int		notifier_count;
89 #ifdef CONFIG_DEBUG_FS
90 	struct dentry		*dentry;
91 	struct hlist_node	debug_node;
92 #endif
93 	struct kref		ref;
94 };
95 
96 #define CREATE_TRACE_POINTS
97 #include <trace/events/clk.h>
98 
99 struct clk {
100 	struct clk_core	*core;
101 	struct device *dev;
102 	const char *dev_id;
103 	const char *con_id;
104 	unsigned long min_rate;
105 	unsigned long max_rate;
106 	unsigned int exclusive_count;
107 	struct hlist_node clks_node;
108 };
109 
110 /***           runtime pm          ***/
clk_pm_runtime_get(struct clk_core * core)111 static int clk_pm_runtime_get(struct clk_core *core)
112 {
113 	int ret;
114 
115 	if (!core->rpm_enabled)
116 		return 0;
117 
118 	ret = pm_runtime_get_sync(core->dev);
119 	if (ret < 0) {
120 		pm_runtime_put_noidle(core->dev);
121 		return ret;
122 	}
123 	return 0;
124 }
125 
clk_pm_runtime_put(struct clk_core * core)126 static void clk_pm_runtime_put(struct clk_core *core)
127 {
128 	if (!core->rpm_enabled)
129 		return;
130 
131 	pm_runtime_put_sync(core->dev);
132 }
133 
134 /***           locking             ***/
clk_prepare_lock(void)135 static void clk_prepare_lock(void)
136 {
137 	if (!mutex_trylock(&prepare_lock)) {
138 		if (prepare_owner == current) {
139 			prepare_refcnt++;
140 			return;
141 		}
142 		mutex_lock(&prepare_lock);
143 	}
144 	WARN_ON_ONCE(prepare_owner != NULL);
145 	WARN_ON_ONCE(prepare_refcnt != 0);
146 	prepare_owner = current;
147 	prepare_refcnt = 1;
148 }
149 
clk_prepare_unlock(void)150 static void clk_prepare_unlock(void)
151 {
152 	WARN_ON_ONCE(prepare_owner != current);
153 	WARN_ON_ONCE(prepare_refcnt == 0);
154 
155 	if (--prepare_refcnt)
156 		return;
157 	prepare_owner = NULL;
158 	mutex_unlock(&prepare_lock);
159 }
160 
clk_enable_lock(void)161 static unsigned long clk_enable_lock(void)
162 	__acquires(enable_lock)
163 {
164 	unsigned long flags;
165 
166 	/*
167 	 * On UP systems, spin_trylock_irqsave() always returns true, even if
168 	 * we already hold the lock. So, in that case, we rely only on
169 	 * reference counting.
170 	 */
171 	if (!IS_ENABLED(CONFIG_SMP) ||
172 	    !spin_trylock_irqsave(&enable_lock, flags)) {
173 		if (enable_owner == current) {
174 			enable_refcnt++;
175 			__acquire(enable_lock);
176 			if (!IS_ENABLED(CONFIG_SMP))
177 				local_save_flags(flags);
178 			return flags;
179 		}
180 		spin_lock_irqsave(&enable_lock, flags);
181 	}
182 	WARN_ON_ONCE(enable_owner != NULL);
183 	WARN_ON_ONCE(enable_refcnt != 0);
184 	enable_owner = current;
185 	enable_refcnt = 1;
186 	return flags;
187 }
188 
clk_enable_unlock(unsigned long flags)189 static void clk_enable_unlock(unsigned long flags)
190 	__releases(enable_lock)
191 {
192 	WARN_ON_ONCE(enable_owner != current);
193 	WARN_ON_ONCE(enable_refcnt == 0);
194 
195 	if (--enable_refcnt) {
196 		__release(enable_lock);
197 		return;
198 	}
199 	enable_owner = NULL;
200 	spin_unlock_irqrestore(&enable_lock, flags);
201 }
202 
clk_core_rate_is_protected(struct clk_core * core)203 static bool clk_core_rate_is_protected(struct clk_core *core)
204 {
205 	return core->protect_count;
206 }
207 
clk_core_is_prepared(struct clk_core * core)208 static bool clk_core_is_prepared(struct clk_core *core)
209 {
210 	bool ret = false;
211 
212 	/*
213 	 * .is_prepared is optional for clocks that can prepare
214 	 * fall back to software usage counter if it is missing
215 	 */
216 	if (!core->ops->is_prepared)
217 		return core->prepare_count;
218 
219 	if (!clk_pm_runtime_get(core)) {
220 		ret = core->ops->is_prepared(core->hw);
221 		clk_pm_runtime_put(core);
222 	}
223 
224 	return ret;
225 }
226 
clk_core_is_enabled(struct clk_core * core)227 static bool clk_core_is_enabled(struct clk_core *core)
228 {
229 	bool ret = false;
230 
231 	/*
232 	 * .is_enabled is only mandatory for clocks that gate
233 	 * fall back to software usage counter if .is_enabled is missing
234 	 */
235 	if (!core->ops->is_enabled)
236 		return core->enable_count;
237 
238 	/*
239 	 * Check if clock controller's device is runtime active before
240 	 * calling .is_enabled callback. If not, assume that clock is
241 	 * disabled, because we might be called from atomic context, from
242 	 * which pm_runtime_get() is not allowed.
243 	 * This function is called mainly from clk_disable_unused_subtree,
244 	 * which ensures proper runtime pm activation of controller before
245 	 * taking enable spinlock, but the below check is needed if one tries
246 	 * to call it from other places.
247 	 */
248 	if (core->rpm_enabled) {
249 		pm_runtime_get_noresume(core->dev);
250 		if (!pm_runtime_active(core->dev)) {
251 			ret = false;
252 			goto done;
253 		}
254 	}
255 
256 	ret = core->ops->is_enabled(core->hw);
257 done:
258 	if (core->rpm_enabled)
259 		pm_runtime_put(core->dev);
260 
261 	return ret;
262 }
263 
264 /***    helper functions   ***/
265 
__clk_get_name(const struct clk * clk)266 const char *__clk_get_name(const struct clk *clk)
267 {
268 	return !clk ? NULL : clk->core->name;
269 }
270 EXPORT_SYMBOL_GPL(__clk_get_name);
271 
clk_hw_get_name(const struct clk_hw * hw)272 const char *clk_hw_get_name(const struct clk_hw *hw)
273 {
274 	return hw->core->name;
275 }
276 EXPORT_SYMBOL_GPL(clk_hw_get_name);
277 
__clk_get_hw(struct clk * clk)278 struct clk_hw *__clk_get_hw(struct clk *clk)
279 {
280 	return !clk ? NULL : clk->core->hw;
281 }
282 EXPORT_SYMBOL_GPL(__clk_get_hw);
283 
clk_hw_get_num_parents(const struct clk_hw * hw)284 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
285 {
286 	return hw->core->num_parents;
287 }
288 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
289 
clk_hw_get_parent(const struct clk_hw * hw)290 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
291 {
292 	return hw->core->parent ? hw->core->parent->hw : NULL;
293 }
294 EXPORT_SYMBOL_GPL(clk_hw_get_parent);
295 
__clk_lookup_subtree(const char * name,struct clk_core * core)296 static struct clk_core *__clk_lookup_subtree(const char *name,
297 					     struct clk_core *core)
298 {
299 	struct clk_core *child;
300 	struct clk_core *ret;
301 
302 	if (!strcmp(core->name, name))
303 		return core;
304 
305 	hlist_for_each_entry(child, &core->children, child_node) {
306 		ret = __clk_lookup_subtree(name, child);
307 		if (ret)
308 			return ret;
309 	}
310 
311 	return NULL;
312 }
313 
clk_core_lookup(const char * name)314 static struct clk_core *clk_core_lookup(const char *name)
315 {
316 	struct clk_core *root_clk;
317 	struct clk_core *ret;
318 
319 	if (!name)
320 		return NULL;
321 
322 	/* search the 'proper' clk tree first */
323 	hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
324 		ret = __clk_lookup_subtree(name, root_clk);
325 		if (ret)
326 			return ret;
327 	}
328 
329 	/* if not found, then search the orphan tree */
330 	hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
331 		ret = __clk_lookup_subtree(name, root_clk);
332 		if (ret)
333 			return ret;
334 	}
335 
336 	return NULL;
337 }
338 
339 #ifdef CONFIG_OF
340 static int of_parse_clkspec(const struct device_node *np, int index,
341 			    const char *name, struct of_phandle_args *out_args);
342 static struct clk_hw *
343 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
344 #else
of_parse_clkspec(const struct device_node * np,int index,const char * name,struct of_phandle_args * out_args)345 static inline int of_parse_clkspec(const struct device_node *np, int index,
346 				   const char *name,
347 				   struct of_phandle_args *out_args)
348 {
349 	return -ENOENT;
350 }
351 static inline struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args * clkspec)352 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
353 {
354 	return ERR_PTR(-ENOENT);
355 }
356 #endif
357 
358 /**
359  * clk_core_get - Find the clk_core parent of a clk
360  * @core: clk to find parent of
361  * @p_index: parent index to search for
362  *
363  * This is the preferred method for clk providers to find the parent of a
364  * clk when that parent is external to the clk controller. The parent_names
365  * array is indexed and treated as a local name matching a string in the device
366  * node's 'clock-names' property or as the 'con_id' matching the device's
367  * dev_name() in a clk_lookup. This allows clk providers to use their own
368  * namespace instead of looking for a globally unique parent string.
369  *
370  * For example the following DT snippet would allow a clock registered by the
371  * clock-controller@c001 that has a clk_init_data::parent_data array
372  * with 'xtal' in the 'name' member to find the clock provided by the
373  * clock-controller@f00abcd without needing to get the globally unique name of
374  * the xtal clk.
375  *
376  *      parent: clock-controller@f00abcd {
377  *              reg = <0xf00abcd 0xabcd>;
378  *              #clock-cells = <0>;
379  *      };
380  *
381  *      clock-controller@c001 {
382  *              reg = <0xc001 0xf00d>;
383  *              clocks = <&parent>;
384  *              clock-names = "xtal";
385  *              #clock-cells = <1>;
386  *      };
387  *
388  * Returns: -ENOENT when the provider can't be found or the clk doesn't
389  * exist in the provider or the name can't be found in the DT node or
390  * in a clkdev lookup. NULL when the provider knows about the clk but it
391  * isn't provided on this system.
392  * A valid clk_core pointer when the clk can be found in the provider.
393  */
clk_core_get(struct clk_core * core,u8 p_index)394 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
395 {
396 	const char *name = core->parents[p_index].fw_name;
397 	int index = core->parents[p_index].index;
398 	struct clk_hw *hw = ERR_PTR(-ENOENT);
399 	struct device *dev = core->dev;
400 	const char *dev_id = dev ? dev_name(dev) : NULL;
401 	struct device_node *np = core->of_node;
402 	struct of_phandle_args clkspec;
403 
404 	if (np && (name || index >= 0) &&
405 	    !of_parse_clkspec(np, index, name, &clkspec)) {
406 		hw = of_clk_get_hw_from_clkspec(&clkspec);
407 		of_node_put(clkspec.np);
408 	} else if (name) {
409 		/*
410 		 * If the DT search above couldn't find the provider fallback to
411 		 * looking up via clkdev based clk_lookups.
412 		 */
413 		hw = clk_find_hw(dev_id, name);
414 	}
415 
416 	if (IS_ERR(hw))
417 		return ERR_CAST(hw);
418 
419 	return hw->core;
420 }
421 
clk_core_fill_parent_index(struct clk_core * core,u8 index)422 static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
423 {
424 	struct clk_parent_map *entry = &core->parents[index];
425 	struct clk_core *parent;
426 
427 	if (entry->hw) {
428 		parent = entry->hw->core;
429 		/*
430 		 * We have a direct reference but it isn't registered yet?
431 		 * Orphan it and let clk_reparent() update the orphan status
432 		 * when the parent is registered.
433 		 */
434 		if (!parent)
435 			parent = ERR_PTR(-EPROBE_DEFER);
436 	} else {
437 		parent = clk_core_get(core, index);
438 		if (PTR_ERR(parent) == -ENOENT && entry->name)
439 			parent = clk_core_lookup(entry->name);
440 	}
441 
442 	/* Only cache it if it's not an error */
443 	if (!IS_ERR(parent))
444 		entry->core = parent;
445 }
446 
clk_core_get_parent_by_index(struct clk_core * core,u8 index)447 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
448 							 u8 index)
449 {
450 	if (!core || index >= core->num_parents || !core->parents)
451 		return NULL;
452 
453 	if (!core->parents[index].core)
454 		clk_core_fill_parent_index(core, index);
455 
456 	return core->parents[index].core;
457 }
458 
459 struct clk_hw *
clk_hw_get_parent_by_index(const struct clk_hw * hw,unsigned int index)460 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
461 {
462 	struct clk_core *parent;
463 
464 	parent = clk_core_get_parent_by_index(hw->core, index);
465 
466 	return !parent ? NULL : parent->hw;
467 }
468 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
469 
__clk_get_enable_count(struct clk * clk)470 unsigned int __clk_get_enable_count(struct clk *clk)
471 {
472 	return !clk ? 0 : clk->core->enable_count;
473 }
474 
clk_core_get_rate_nolock(struct clk_core * core)475 static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
476 {
477 	if (!core)
478 		return 0;
479 
480 	if (!core->num_parents || core->parent)
481 		return core->rate;
482 
483 	/*
484 	 * Clk must have a parent because num_parents > 0 but the parent isn't
485 	 * known yet. Best to return 0 as the rate of this clk until we can
486 	 * properly recalc the rate based on the parent's rate.
487 	 */
488 	return 0;
489 }
490 
clk_hw_get_rate(const struct clk_hw * hw)491 unsigned long clk_hw_get_rate(const struct clk_hw *hw)
492 {
493 	return clk_core_get_rate_nolock(hw->core);
494 }
495 EXPORT_SYMBOL_GPL(clk_hw_get_rate);
496 
clk_core_get_accuracy_no_lock(struct clk_core * core)497 static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
498 {
499 	if (!core)
500 		return 0;
501 
502 	return core->accuracy;
503 }
504 
clk_hw_get_flags(const struct clk_hw * hw)505 unsigned long clk_hw_get_flags(const struct clk_hw *hw)
506 {
507 	return hw->core->flags;
508 }
509 EXPORT_SYMBOL_GPL(clk_hw_get_flags);
510 
clk_hw_is_prepared(const struct clk_hw * hw)511 bool clk_hw_is_prepared(const struct clk_hw *hw)
512 {
513 	return clk_core_is_prepared(hw->core);
514 }
515 EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
516 
clk_hw_rate_is_protected(const struct clk_hw * hw)517 bool clk_hw_rate_is_protected(const struct clk_hw *hw)
518 {
519 	return clk_core_rate_is_protected(hw->core);
520 }
521 EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
522 
clk_hw_is_enabled(const struct clk_hw * hw)523 bool clk_hw_is_enabled(const struct clk_hw *hw)
524 {
525 	return clk_core_is_enabled(hw->core);
526 }
527 EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
528 
__clk_is_enabled(struct clk * clk)529 bool __clk_is_enabled(struct clk *clk)
530 {
531 	if (!clk)
532 		return false;
533 
534 	return clk_core_is_enabled(clk->core);
535 }
536 EXPORT_SYMBOL_GPL(__clk_is_enabled);
537 
mux_is_better_rate(unsigned long rate,unsigned long now,unsigned long best,unsigned long flags)538 static bool mux_is_better_rate(unsigned long rate, unsigned long now,
539 			   unsigned long best, unsigned long flags)
540 {
541 	if (flags & CLK_MUX_ROUND_CLOSEST)
542 		return abs(now - rate) < abs(best - rate);
543 
544 	return now <= rate && now > best;
545 }
546 
clk_mux_determine_rate_flags(struct clk_hw * hw,struct clk_rate_request * req,unsigned long flags)547 int clk_mux_determine_rate_flags(struct clk_hw *hw,
548 				 struct clk_rate_request *req,
549 				 unsigned long flags)
550 {
551 	struct clk_core *core = hw->core, *parent, *best_parent = NULL;
552 	int i, num_parents, ret;
553 	unsigned long best = 0;
554 	struct clk_rate_request parent_req = *req;
555 
556 	/* if NO_REPARENT flag set, pass through to current parent */
557 	if (core->flags & CLK_SET_RATE_NO_REPARENT) {
558 		parent = core->parent;
559 		if (core->flags & CLK_SET_RATE_PARENT) {
560 			ret = __clk_determine_rate(parent ? parent->hw : NULL,
561 						   &parent_req);
562 			if (ret)
563 				return ret;
564 
565 			best = parent_req.rate;
566 		} else if (parent) {
567 			best = clk_core_get_rate_nolock(parent);
568 		} else {
569 			best = clk_core_get_rate_nolock(core);
570 		}
571 
572 		goto out;
573 	}
574 
575 	/* find the parent that can provide the fastest rate <= rate */
576 	num_parents = core->num_parents;
577 	for (i = 0; i < num_parents; i++) {
578 		parent = clk_core_get_parent_by_index(core, i);
579 		if (!parent)
580 			continue;
581 
582 		if (core->flags & CLK_SET_RATE_PARENT) {
583 			parent_req = *req;
584 			ret = __clk_determine_rate(parent->hw, &parent_req);
585 			if (ret)
586 				continue;
587 		} else {
588 			parent_req.rate = clk_core_get_rate_nolock(parent);
589 		}
590 
591 		if (mux_is_better_rate(req->rate, parent_req.rate,
592 				       best, flags)) {
593 			best_parent = parent;
594 			best = parent_req.rate;
595 		}
596 	}
597 
598 	if (!best_parent)
599 		return -EINVAL;
600 
601 out:
602 	if (best_parent)
603 		req->best_parent_hw = best_parent->hw;
604 	req->best_parent_rate = best;
605 	req->rate = best;
606 
607 	return 0;
608 }
609 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
610 
__clk_lookup(const char * name)611 struct clk *__clk_lookup(const char *name)
612 {
613 	struct clk_core *core = clk_core_lookup(name);
614 
615 	return !core ? NULL : core->hw->clk;
616 }
617 
clk_core_get_boundaries(struct clk_core * core,unsigned long * min_rate,unsigned long * max_rate)618 static void clk_core_get_boundaries(struct clk_core *core,
619 				    unsigned long *min_rate,
620 				    unsigned long *max_rate)
621 {
622 	struct clk *clk_user;
623 
624 	lockdep_assert_held(&prepare_lock);
625 
626 	*min_rate = core->min_rate;
627 	*max_rate = core->max_rate;
628 
629 	hlist_for_each_entry(clk_user, &core->clks, clks_node)
630 		*min_rate = max(*min_rate, clk_user->min_rate);
631 
632 	hlist_for_each_entry(clk_user, &core->clks, clks_node)
633 		*max_rate = min(*max_rate, clk_user->max_rate);
634 }
635 
clk_core_check_boundaries(struct clk_core * core,unsigned long min_rate,unsigned long max_rate)636 static bool clk_core_check_boundaries(struct clk_core *core,
637 				      unsigned long min_rate,
638 				      unsigned long max_rate)
639 {
640 	struct clk *user;
641 
642 	lockdep_assert_held(&prepare_lock);
643 
644 	if (min_rate > core->max_rate || max_rate < core->min_rate)
645 		return false;
646 
647 	hlist_for_each_entry(user, &core->clks, clks_node)
648 		if (min_rate > user->max_rate || max_rate < user->min_rate)
649 			return false;
650 
651 	return true;
652 }
653 
clk_hw_set_rate_range(struct clk_hw * hw,unsigned long min_rate,unsigned long max_rate)654 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
655 			   unsigned long max_rate)
656 {
657 	hw->core->min_rate = min_rate;
658 	hw->core->max_rate = max_rate;
659 }
660 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
661 
662 /*
663  * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
664  * @hw: mux type clk to determine rate on
665  * @req: rate request, also used to return preferred parent and frequencies
666  *
667  * Helper for finding best parent to provide a given frequency. This can be used
668  * directly as a determine_rate callback (e.g. for a mux), or from a more
669  * complex clock that may combine a mux with other operations.
670  *
671  * Returns: 0 on success, -EERROR value on error
672  */
__clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)673 int __clk_mux_determine_rate(struct clk_hw *hw,
674 			     struct clk_rate_request *req)
675 {
676 	return clk_mux_determine_rate_flags(hw, req, 0);
677 }
678 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
679 
__clk_mux_determine_rate_closest(struct clk_hw * hw,struct clk_rate_request * req)680 int __clk_mux_determine_rate_closest(struct clk_hw *hw,
681 				     struct clk_rate_request *req)
682 {
683 	return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
684 }
685 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
686 
687 /***        clk api        ***/
688 
clk_core_rate_unprotect(struct clk_core * core)689 static void clk_core_rate_unprotect(struct clk_core *core)
690 {
691 	lockdep_assert_held(&prepare_lock);
692 
693 	if (!core)
694 		return;
695 
696 	if (WARN(core->protect_count == 0,
697 	    "%s already unprotected\n", core->name))
698 		return;
699 
700 	if (--core->protect_count > 0)
701 		return;
702 
703 	clk_core_rate_unprotect(core->parent);
704 }
705 
clk_core_rate_nuke_protect(struct clk_core * core)706 static int clk_core_rate_nuke_protect(struct clk_core *core)
707 {
708 	int ret;
709 
710 	lockdep_assert_held(&prepare_lock);
711 
712 	if (!core)
713 		return -EINVAL;
714 
715 	if (core->protect_count == 0)
716 		return 0;
717 
718 	ret = core->protect_count;
719 	core->protect_count = 1;
720 	clk_core_rate_unprotect(core);
721 
722 	return ret;
723 }
724 
725 /**
726  * clk_rate_exclusive_put - release exclusivity over clock rate control
727  * @clk: the clk over which the exclusivity is released
728  *
729  * clk_rate_exclusive_put() completes a critical section during which a clock
730  * consumer cannot tolerate any other consumer making any operation on the
731  * clock which could result in a rate change or rate glitch. Exclusive clocks
732  * cannot have their rate changed, either directly or indirectly due to changes
733  * further up the parent chain of clocks. As a result, clocks up parent chain
734  * also get under exclusive control of the calling consumer.
735  *
736  * If exlusivity is claimed more than once on clock, even by the same consumer,
737  * the rate effectively gets locked as exclusivity can't be preempted.
738  *
739  * Calls to clk_rate_exclusive_put() must be balanced with calls to
740  * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
741  * error status.
742  */
clk_rate_exclusive_put(struct clk * clk)743 void clk_rate_exclusive_put(struct clk *clk)
744 {
745 	if (!clk)
746 		return;
747 
748 	clk_prepare_lock();
749 
750 	/*
751 	 * if there is something wrong with this consumer protect count, stop
752 	 * here before messing with the provider
753 	 */
754 	if (WARN_ON(clk->exclusive_count <= 0))
755 		goto out;
756 
757 	clk_core_rate_unprotect(clk->core);
758 	clk->exclusive_count--;
759 out:
760 	clk_prepare_unlock();
761 }
762 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
763 
clk_core_rate_protect(struct clk_core * core)764 static void clk_core_rate_protect(struct clk_core *core)
765 {
766 	lockdep_assert_held(&prepare_lock);
767 
768 	if (!core)
769 		return;
770 
771 	if (core->protect_count == 0)
772 		clk_core_rate_protect(core->parent);
773 
774 	core->protect_count++;
775 }
776 
clk_core_rate_restore_protect(struct clk_core * core,int count)777 static void clk_core_rate_restore_protect(struct clk_core *core, int count)
778 {
779 	lockdep_assert_held(&prepare_lock);
780 
781 	if (!core)
782 		return;
783 
784 	if (count == 0)
785 		return;
786 
787 	clk_core_rate_protect(core);
788 	core->protect_count = count;
789 }
790 
791 /**
792  * clk_rate_exclusive_get - get exclusivity over the clk rate control
793  * @clk: the clk over which the exclusity of rate control is requested
794  *
795  * clk_rate_exclusive_get() begins a critical section during which a clock
796  * consumer cannot tolerate any other consumer making any operation on the
797  * clock which could result in a rate change or rate glitch. Exclusive clocks
798  * cannot have their rate changed, either directly or indirectly due to changes
799  * further up the parent chain of clocks. As a result, clocks up parent chain
800  * also get under exclusive control of the calling consumer.
801  *
802  * If exlusivity is claimed more than once on clock, even by the same consumer,
803  * the rate effectively gets locked as exclusivity can't be preempted.
804  *
805  * Calls to clk_rate_exclusive_get() should be balanced with calls to
806  * clk_rate_exclusive_put(). Calls to this function may sleep.
807  * Returns 0 on success, -EERROR otherwise
808  */
clk_rate_exclusive_get(struct clk * clk)809 int clk_rate_exclusive_get(struct clk *clk)
810 {
811 	if (!clk)
812 		return 0;
813 
814 	clk_prepare_lock();
815 	clk_core_rate_protect(clk->core);
816 	clk->exclusive_count++;
817 	clk_prepare_unlock();
818 
819 	return 0;
820 }
821 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
822 
clk_core_unprepare(struct clk_core * core)823 static void clk_core_unprepare(struct clk_core *core)
824 {
825 	lockdep_assert_held(&prepare_lock);
826 
827 	if (!core)
828 		return;
829 
830 	if (WARN(core->prepare_count == 0,
831 	    "%s already unprepared\n", core->name))
832 		return;
833 
834 	if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
835 	    "Unpreparing critical %s\n", core->name))
836 		return;
837 
838 	if (core->flags & CLK_SET_RATE_GATE)
839 		clk_core_rate_unprotect(core);
840 
841 	if (--core->prepare_count > 0)
842 		return;
843 
844 	WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
845 
846 	trace_clk_unprepare(core);
847 
848 	if (core->ops->unprepare)
849 		core->ops->unprepare(core->hw);
850 
851 	trace_clk_unprepare_complete(core);
852 	clk_core_unprepare(core->parent);
853 	clk_pm_runtime_put(core);
854 }
855 
clk_core_unprepare_lock(struct clk_core * core)856 static void clk_core_unprepare_lock(struct clk_core *core)
857 {
858 	clk_prepare_lock();
859 	clk_core_unprepare(core);
860 	clk_prepare_unlock();
861 }
862 
863 /**
864  * clk_unprepare - undo preparation of a clock source
865  * @clk: the clk being unprepared
866  *
867  * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
868  * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
869  * if the operation may sleep.  One example is a clk which is accessed over
870  * I2c.  In the complex case a clk gate operation may require a fast and a slow
871  * part.  It is this reason that clk_unprepare and clk_disable are not mutually
872  * exclusive.  In fact clk_disable must be called before clk_unprepare.
873  */
clk_unprepare(struct clk * clk)874 void clk_unprepare(struct clk *clk)
875 {
876 	if (IS_ERR_OR_NULL(clk))
877 		return;
878 
879 	clk_core_unprepare_lock(clk->core);
880 }
881 EXPORT_SYMBOL_GPL(clk_unprepare);
882 
clk_core_prepare(struct clk_core * core)883 static int clk_core_prepare(struct clk_core *core)
884 {
885 	int ret = 0;
886 
887 	lockdep_assert_held(&prepare_lock);
888 
889 	if (!core)
890 		return 0;
891 
892 	if (core->prepare_count == 0) {
893 		ret = clk_pm_runtime_get(core);
894 		if (ret)
895 			return ret;
896 
897 		ret = clk_core_prepare(core->parent);
898 		if (ret)
899 			goto runtime_put;
900 
901 		trace_clk_prepare(core);
902 
903 		if (core->ops->prepare)
904 			ret = core->ops->prepare(core->hw);
905 
906 		trace_clk_prepare_complete(core);
907 
908 		if (ret)
909 			goto unprepare;
910 	}
911 
912 	core->prepare_count++;
913 
914 	/*
915 	 * CLK_SET_RATE_GATE is a special case of clock protection
916 	 * Instead of a consumer claiming exclusive rate control, it is
917 	 * actually the provider which prevents any consumer from making any
918 	 * operation which could result in a rate change or rate glitch while
919 	 * the clock is prepared.
920 	 */
921 	if (core->flags & CLK_SET_RATE_GATE)
922 		clk_core_rate_protect(core);
923 
924 	return 0;
925 unprepare:
926 	clk_core_unprepare(core->parent);
927 runtime_put:
928 	clk_pm_runtime_put(core);
929 	return ret;
930 }
931 
clk_core_prepare_lock(struct clk_core * core)932 static int clk_core_prepare_lock(struct clk_core *core)
933 {
934 	int ret;
935 
936 	clk_prepare_lock();
937 	ret = clk_core_prepare(core);
938 	clk_prepare_unlock();
939 
940 	return ret;
941 }
942 
943 /**
944  * clk_prepare - prepare a clock source
945  * @clk: the clk being prepared
946  *
947  * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
948  * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
949  * operation may sleep.  One example is a clk which is accessed over I2c.  In
950  * the complex case a clk ungate operation may require a fast and a slow part.
951  * It is this reason that clk_prepare and clk_enable are not mutually
952  * exclusive.  In fact clk_prepare must be called before clk_enable.
953  * Returns 0 on success, -EERROR otherwise.
954  */
clk_prepare(struct clk * clk)955 int clk_prepare(struct clk *clk)
956 {
957 	if (!clk)
958 		return 0;
959 
960 	return clk_core_prepare_lock(clk->core);
961 }
962 EXPORT_SYMBOL_GPL(clk_prepare);
963 
clk_core_disable(struct clk_core * core)964 static void clk_core_disable(struct clk_core *core)
965 {
966 	lockdep_assert_held(&enable_lock);
967 
968 	if (!core)
969 		return;
970 
971 	if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
972 		return;
973 
974 	if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
975 	    "Disabling critical %s\n", core->name))
976 		return;
977 
978 	if (--core->enable_count > 0)
979 		return;
980 
981 	trace_clk_disable_rcuidle(core);
982 
983 	if (core->ops->disable)
984 		core->ops->disable(core->hw);
985 
986 	trace_clk_disable_complete_rcuidle(core);
987 
988 	clk_core_disable(core->parent);
989 }
990 
clk_core_disable_lock(struct clk_core * core)991 static void clk_core_disable_lock(struct clk_core *core)
992 {
993 	unsigned long flags;
994 
995 	flags = clk_enable_lock();
996 	clk_core_disable(core);
997 	clk_enable_unlock(flags);
998 }
999 
1000 /**
1001  * clk_disable - gate a clock
1002  * @clk: the clk being gated
1003  *
1004  * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
1005  * a simple case, clk_disable can be used instead of clk_unprepare to gate a
1006  * clk if the operation is fast and will never sleep.  One example is a
1007  * SoC-internal clk which is controlled via simple register writes.  In the
1008  * complex case a clk gate operation may require a fast and a slow part.  It is
1009  * this reason that clk_unprepare and clk_disable are not mutually exclusive.
1010  * In fact clk_disable must be called before clk_unprepare.
1011  */
clk_disable(struct clk * clk)1012 void clk_disable(struct clk *clk)
1013 {
1014 	if (IS_ERR_OR_NULL(clk))
1015 		return;
1016 
1017 	clk_core_disable_lock(clk->core);
1018 }
1019 EXPORT_SYMBOL_GPL(clk_disable);
1020 
clk_core_enable(struct clk_core * core)1021 static int clk_core_enable(struct clk_core *core)
1022 {
1023 	int ret = 0;
1024 
1025 	lockdep_assert_held(&enable_lock);
1026 
1027 	if (!core)
1028 		return 0;
1029 
1030 	if (WARN(core->prepare_count == 0,
1031 	    "Enabling unprepared %s\n", core->name))
1032 		return -ESHUTDOWN;
1033 
1034 	if (core->enable_count == 0) {
1035 		ret = clk_core_enable(core->parent);
1036 
1037 		if (ret)
1038 			return ret;
1039 
1040 		trace_clk_enable_rcuidle(core);
1041 
1042 		if (core->ops->enable)
1043 			ret = core->ops->enable(core->hw);
1044 
1045 		trace_clk_enable_complete_rcuidle(core);
1046 
1047 		if (ret) {
1048 			clk_core_disable(core->parent);
1049 			return ret;
1050 		}
1051 	}
1052 
1053 	core->enable_count++;
1054 	return 0;
1055 }
1056 
clk_core_enable_lock(struct clk_core * core)1057 static int clk_core_enable_lock(struct clk_core *core)
1058 {
1059 	unsigned long flags;
1060 	int ret;
1061 
1062 	flags = clk_enable_lock();
1063 	ret = clk_core_enable(core);
1064 	clk_enable_unlock(flags);
1065 
1066 	return ret;
1067 }
1068 
1069 /**
1070  * clk_gate_restore_context - restore context for poweroff
1071  * @hw: the clk_hw pointer of clock whose state is to be restored
1072  *
1073  * The clock gate restore context function enables or disables
1074  * the gate clocks based on the enable_count. This is done in cases
1075  * where the clock context is lost and based on the enable_count
1076  * the clock either needs to be enabled/disabled. This
1077  * helps restore the state of gate clocks.
1078  */
clk_gate_restore_context(struct clk_hw * hw)1079 void clk_gate_restore_context(struct clk_hw *hw)
1080 {
1081 	struct clk_core *core = hw->core;
1082 
1083 	if (core->enable_count)
1084 		core->ops->enable(hw);
1085 	else
1086 		core->ops->disable(hw);
1087 }
1088 EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1089 
clk_core_save_context(struct clk_core * core)1090 static int clk_core_save_context(struct clk_core *core)
1091 {
1092 	struct clk_core *child;
1093 	int ret = 0;
1094 
1095 	hlist_for_each_entry(child, &core->children, child_node) {
1096 		ret = clk_core_save_context(child);
1097 		if (ret < 0)
1098 			return ret;
1099 	}
1100 
1101 	if (core->ops && core->ops->save_context)
1102 		ret = core->ops->save_context(core->hw);
1103 
1104 	return ret;
1105 }
1106 
clk_core_restore_context(struct clk_core * core)1107 static void clk_core_restore_context(struct clk_core *core)
1108 {
1109 	struct clk_core *child;
1110 
1111 	if (core->ops && core->ops->restore_context)
1112 		core->ops->restore_context(core->hw);
1113 
1114 	hlist_for_each_entry(child, &core->children, child_node)
1115 		clk_core_restore_context(child);
1116 }
1117 
1118 /**
1119  * clk_save_context - save clock context for poweroff
1120  *
1121  * Saves the context of the clock register for powerstates in which the
1122  * contents of the registers will be lost. Occurs deep within the suspend
1123  * code.  Returns 0 on success.
1124  */
clk_save_context(void)1125 int clk_save_context(void)
1126 {
1127 	struct clk_core *clk;
1128 	int ret;
1129 
1130 	hlist_for_each_entry(clk, &clk_root_list, child_node) {
1131 		ret = clk_core_save_context(clk);
1132 		if (ret < 0)
1133 			return ret;
1134 	}
1135 
1136 	hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1137 		ret = clk_core_save_context(clk);
1138 		if (ret < 0)
1139 			return ret;
1140 	}
1141 
1142 	return 0;
1143 }
1144 EXPORT_SYMBOL_GPL(clk_save_context);
1145 
1146 /**
1147  * clk_restore_context - restore clock context after poweroff
1148  *
1149  * Restore the saved clock context upon resume.
1150  *
1151  */
clk_restore_context(void)1152 void clk_restore_context(void)
1153 {
1154 	struct clk_core *core;
1155 
1156 	hlist_for_each_entry(core, &clk_root_list, child_node)
1157 		clk_core_restore_context(core);
1158 
1159 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1160 		clk_core_restore_context(core);
1161 }
1162 EXPORT_SYMBOL_GPL(clk_restore_context);
1163 
1164 /**
1165  * clk_enable - ungate a clock
1166  * @clk: the clk being ungated
1167  *
1168  * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
1169  * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1170  * if the operation will never sleep.  One example is a SoC-internal clk which
1171  * is controlled via simple register writes.  In the complex case a clk ungate
1172  * operation may require a fast and a slow part.  It is this reason that
1173  * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
1174  * must be called before clk_enable.  Returns 0 on success, -EERROR
1175  * otherwise.
1176  */
clk_enable(struct clk * clk)1177 int clk_enable(struct clk *clk)
1178 {
1179 	if (!clk)
1180 		return 0;
1181 
1182 	return clk_core_enable_lock(clk->core);
1183 }
1184 EXPORT_SYMBOL_GPL(clk_enable);
1185 
1186 /**
1187  * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
1188  * @clk: clock source
1189  *
1190  * Returns true if clk_prepare() implicitly enables the clock, effectively
1191  * making clk_enable()/clk_disable() no-ops, false otherwise.
1192  *
1193  * This is of interest mainly to power management code where actually
1194  * disabling the clock also requires unpreparing it to have any material
1195  * effect.
1196  *
1197  * Regardless of the value returned here, the caller must always invoke
1198  * clk_enable() or clk_prepare_enable()  and counterparts for usage counts
1199  * to be right.
1200  */
clk_is_enabled_when_prepared(struct clk * clk)1201 bool clk_is_enabled_when_prepared(struct clk *clk)
1202 {
1203 	return clk && !(clk->core->ops->enable && clk->core->ops->disable);
1204 }
1205 EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared);
1206 
clk_core_prepare_enable(struct clk_core * core)1207 static int clk_core_prepare_enable(struct clk_core *core)
1208 {
1209 	int ret;
1210 
1211 	ret = clk_core_prepare_lock(core);
1212 	if (ret)
1213 		return ret;
1214 
1215 	ret = clk_core_enable_lock(core);
1216 	if (ret)
1217 		clk_core_unprepare_lock(core);
1218 
1219 	return ret;
1220 }
1221 
clk_core_disable_unprepare(struct clk_core * core)1222 static void clk_core_disable_unprepare(struct clk_core *core)
1223 {
1224 	clk_core_disable_lock(core);
1225 	clk_core_unprepare_lock(core);
1226 }
1227 
clk_unprepare_unused_subtree(struct clk_core * core)1228 static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1229 {
1230 	struct clk_core *child;
1231 
1232 	lockdep_assert_held(&prepare_lock);
1233 
1234 	hlist_for_each_entry(child, &core->children, child_node)
1235 		clk_unprepare_unused_subtree(child);
1236 
1237 	if (dev_has_sync_state(core->dev) &&
1238 	    !(core->flags & CLK_DONT_HOLD_STATE))
1239 		return;
1240 
1241 	if (core->prepare_count)
1242 		return;
1243 
1244 	if (core->flags & CLK_IGNORE_UNUSED)
1245 		return;
1246 
1247 	if (clk_pm_runtime_get(core))
1248 		return;
1249 
1250 	if (clk_core_is_prepared(core)) {
1251 		trace_clk_unprepare(core);
1252 		if (core->ops->unprepare_unused)
1253 			core->ops->unprepare_unused(core->hw);
1254 		else if (core->ops->unprepare)
1255 			core->ops->unprepare(core->hw);
1256 		trace_clk_unprepare_complete(core);
1257 	}
1258 
1259 	clk_pm_runtime_put(core);
1260 }
1261 
clk_disable_unused_subtree(struct clk_core * core)1262 static void __init clk_disable_unused_subtree(struct clk_core *core)
1263 {
1264 	struct clk_core *child;
1265 	unsigned long flags;
1266 
1267 	lockdep_assert_held(&prepare_lock);
1268 
1269 	hlist_for_each_entry(child, &core->children, child_node)
1270 		clk_disable_unused_subtree(child);
1271 
1272 	if (dev_has_sync_state(core->dev) &&
1273 	    !(core->flags & CLK_DONT_HOLD_STATE))
1274 		return;
1275 
1276 	if (core->flags & CLK_OPS_PARENT_ENABLE)
1277 		clk_core_prepare_enable(core->parent);
1278 
1279 	if (clk_pm_runtime_get(core))
1280 		goto unprepare_out;
1281 
1282 	flags = clk_enable_lock();
1283 
1284 	if (core->enable_count)
1285 		goto unlock_out;
1286 
1287 	if (core->flags & CLK_IGNORE_UNUSED)
1288 		goto unlock_out;
1289 
1290 	/*
1291 	 * some gate clocks have special needs during the disable-unused
1292 	 * sequence.  call .disable_unused if available, otherwise fall
1293 	 * back to .disable
1294 	 */
1295 	if (clk_core_is_enabled(core)) {
1296 		trace_clk_disable(core);
1297 		if (core->ops->disable_unused)
1298 			core->ops->disable_unused(core->hw);
1299 		else if (core->ops->disable)
1300 			core->ops->disable(core->hw);
1301 		trace_clk_disable_complete(core);
1302 	}
1303 
1304 unlock_out:
1305 	clk_enable_unlock(flags);
1306 	clk_pm_runtime_put(core);
1307 unprepare_out:
1308 	if (core->flags & CLK_OPS_PARENT_ENABLE)
1309 		clk_core_disable_unprepare(core->parent);
1310 }
1311 
1312 static bool clk_ignore_unused __initdata;
clk_ignore_unused_setup(char * __unused)1313 static int __init clk_ignore_unused_setup(char *__unused)
1314 {
1315 	clk_ignore_unused = true;
1316 	return 1;
1317 }
1318 __setup("clk_ignore_unused", clk_ignore_unused_setup);
1319 
clk_disable_unused(void)1320 static int __init clk_disable_unused(void)
1321 {
1322 	struct clk_core *core;
1323 
1324 	if (clk_ignore_unused) {
1325 		pr_warn("clk: Not disabling unused clocks\n");
1326 		return 0;
1327 	}
1328 
1329 	clk_prepare_lock();
1330 
1331 	hlist_for_each_entry(core, &clk_root_list, child_node)
1332 		clk_disable_unused_subtree(core);
1333 
1334 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1335 		clk_disable_unused_subtree(core);
1336 
1337 	hlist_for_each_entry(core, &clk_root_list, child_node)
1338 		clk_unprepare_unused_subtree(core);
1339 
1340 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1341 		clk_unprepare_unused_subtree(core);
1342 
1343 	clk_prepare_unlock();
1344 
1345 	return 0;
1346 }
1347 late_initcall_sync(clk_disable_unused);
1348 
clk_unprepare_disable_dev_subtree(struct clk_core * core,struct device * dev)1349 static void clk_unprepare_disable_dev_subtree(struct clk_core *core,
1350 					      struct device *dev)
1351 {
1352 	struct clk_core *child;
1353 
1354 	lockdep_assert_held(&prepare_lock);
1355 
1356 	hlist_for_each_entry(child, &core->children, child_node)
1357 		clk_unprepare_disable_dev_subtree(child, dev);
1358 
1359 	if (core->dev != dev || !core->need_sync)
1360 		return;
1361 
1362 	clk_core_disable_unprepare(core);
1363 }
1364 
clk_sync_state(struct device * dev)1365 void clk_sync_state(struct device *dev)
1366 {
1367 	struct clk_core *core;
1368 
1369 	clk_prepare_lock();
1370 
1371 	hlist_for_each_entry(core, &clk_root_list, child_node)
1372 		clk_unprepare_disable_dev_subtree(core, dev);
1373 
1374 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1375 		clk_unprepare_disable_dev_subtree(core, dev);
1376 
1377 	clk_prepare_unlock();
1378 }
1379 EXPORT_SYMBOL_GPL(clk_sync_state);
1380 
clk_core_determine_round_nolock(struct clk_core * core,struct clk_rate_request * req)1381 static int clk_core_determine_round_nolock(struct clk_core *core,
1382 					   struct clk_rate_request *req)
1383 {
1384 	long rate;
1385 
1386 	lockdep_assert_held(&prepare_lock);
1387 
1388 	if (!core)
1389 		return 0;
1390 
1391 	/*
1392 	 * At this point, core protection will be disabled
1393 	 * - if the provider is not protected at all
1394 	 * - if the calling consumer is the only one which has exclusivity
1395 	 *   over the provider
1396 	 */
1397 	if (clk_core_rate_is_protected(core)) {
1398 		req->rate = core->rate;
1399 	} else if (core->ops->determine_rate) {
1400 		return core->ops->determine_rate(core->hw, req);
1401 	} else if (core->ops->round_rate) {
1402 		rate = core->ops->round_rate(core->hw, req->rate,
1403 					     &req->best_parent_rate);
1404 		if (rate < 0)
1405 			return rate;
1406 
1407 		req->rate = rate;
1408 	} else {
1409 		return -EINVAL;
1410 	}
1411 
1412 	return 0;
1413 }
1414 
clk_core_init_rate_req(struct clk_core * const core,struct clk_rate_request * req)1415 static void clk_core_init_rate_req(struct clk_core * const core,
1416 				   struct clk_rate_request *req)
1417 {
1418 	struct clk_core *parent;
1419 
1420 	if (WARN_ON(!core || !req))
1421 		return;
1422 
1423 	parent = core->parent;
1424 	if (parent) {
1425 		req->best_parent_hw = parent->hw;
1426 		req->best_parent_rate = parent->rate;
1427 	} else {
1428 		req->best_parent_hw = NULL;
1429 		req->best_parent_rate = 0;
1430 	}
1431 }
1432 
clk_core_can_round(struct clk_core * const core)1433 static bool clk_core_can_round(struct clk_core * const core)
1434 {
1435 	return core->ops->determine_rate || core->ops->round_rate;
1436 }
1437 
clk_core_round_rate_nolock(struct clk_core * core,struct clk_rate_request * req)1438 static int clk_core_round_rate_nolock(struct clk_core *core,
1439 				      struct clk_rate_request *req)
1440 {
1441 	lockdep_assert_held(&prepare_lock);
1442 
1443 	if (!core) {
1444 		req->rate = 0;
1445 		return 0;
1446 	}
1447 
1448 	clk_core_init_rate_req(core, req);
1449 
1450 	if (clk_core_can_round(core))
1451 		return clk_core_determine_round_nolock(core, req);
1452 	else if (core->flags & CLK_SET_RATE_PARENT)
1453 		return clk_core_round_rate_nolock(core->parent, req);
1454 
1455 	req->rate = core->rate;
1456 	return 0;
1457 }
1458 
1459 /**
1460  * __clk_determine_rate - get the closest rate actually supported by a clock
1461  * @hw: determine the rate of this clock
1462  * @req: target rate request
1463  *
1464  * Useful for clk_ops such as .set_rate and .determine_rate.
1465  */
__clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1466 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1467 {
1468 	if (!hw) {
1469 		req->rate = 0;
1470 		return 0;
1471 	}
1472 
1473 	return clk_core_round_rate_nolock(hw->core, req);
1474 }
1475 EXPORT_SYMBOL_GPL(__clk_determine_rate);
1476 
1477 /**
1478  * clk_hw_round_rate() - round the given rate for a hw clk
1479  * @hw: the hw clk for which we are rounding a rate
1480  * @rate: the rate which is to be rounded
1481  *
1482  * Takes in a rate as input and rounds it to a rate that the clk can actually
1483  * use.
1484  *
1485  * Context: prepare_lock must be held.
1486  *          For clk providers to call from within clk_ops such as .round_rate,
1487  *          .determine_rate.
1488  *
1489  * Return: returns rounded rate of hw clk if clk supports round_rate operation
1490  *         else returns the parent rate.
1491  */
clk_hw_round_rate(struct clk_hw * hw,unsigned long rate)1492 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1493 {
1494 	int ret;
1495 	struct clk_rate_request req;
1496 
1497 	clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1498 	req.rate = rate;
1499 
1500 	ret = clk_core_round_rate_nolock(hw->core, &req);
1501 	if (ret)
1502 		return 0;
1503 
1504 	return req.rate;
1505 }
1506 EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1507 
1508 /**
1509  * clk_round_rate - round the given rate for a clk
1510  * @clk: the clk for which we are rounding a rate
1511  * @rate: the rate which is to be rounded
1512  *
1513  * Takes in a rate as input and rounds it to a rate that the clk can actually
1514  * use which is then returned.  If clk doesn't support round_rate operation
1515  * then the parent rate is returned.
1516  */
clk_round_rate(struct clk * clk,unsigned long rate)1517 long clk_round_rate(struct clk *clk, unsigned long rate)
1518 {
1519 	struct clk_rate_request req;
1520 	int ret;
1521 
1522 	if (!clk)
1523 		return 0;
1524 
1525 	clk_prepare_lock();
1526 
1527 	if (clk->exclusive_count)
1528 		clk_core_rate_unprotect(clk->core);
1529 
1530 	clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1531 	req.rate = rate;
1532 
1533 	ret = clk_core_round_rate_nolock(clk->core, &req);
1534 
1535 	if (clk->exclusive_count)
1536 		clk_core_rate_protect(clk->core);
1537 
1538 	clk_prepare_unlock();
1539 
1540 	if (ret)
1541 		return ret;
1542 
1543 	return req.rate;
1544 }
1545 EXPORT_SYMBOL_GPL(clk_round_rate);
1546 
1547 /**
1548  * __clk_notify - call clk notifier chain
1549  * @core: clk that is changing rate
1550  * @msg: clk notifier type (see include/linux/clk.h)
1551  * @old_rate: old clk rate
1552  * @new_rate: new clk rate
1553  *
1554  * Triggers a notifier call chain on the clk rate-change notification
1555  * for 'clk'.  Passes a pointer to the struct clk and the previous
1556  * and current rates to the notifier callback.  Intended to be called by
1557  * internal clock code only.  Returns NOTIFY_DONE from the last driver
1558  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1559  * a driver returns that.
1560  */
__clk_notify(struct clk_core * core,unsigned long msg,unsigned long old_rate,unsigned long new_rate)1561 static int __clk_notify(struct clk_core *core, unsigned long msg,
1562 		unsigned long old_rate, unsigned long new_rate)
1563 {
1564 	struct clk_notifier *cn;
1565 	struct clk_notifier_data cnd;
1566 	int ret = NOTIFY_DONE;
1567 
1568 	cnd.old_rate = old_rate;
1569 	cnd.new_rate = new_rate;
1570 
1571 	list_for_each_entry(cn, &clk_notifier_list, node) {
1572 		if (cn->clk->core == core) {
1573 			cnd.clk = cn->clk;
1574 			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1575 					&cnd);
1576 			if (ret & NOTIFY_STOP_MASK)
1577 				return ret;
1578 		}
1579 	}
1580 
1581 	return ret;
1582 }
1583 
1584 /**
1585  * __clk_recalc_accuracies
1586  * @core: first clk in the subtree
1587  *
1588  * Walks the subtree of clks starting with clk and recalculates accuracies as
1589  * it goes.  Note that if a clk does not implement the .recalc_accuracy
1590  * callback then it is assumed that the clock will take on the accuracy of its
1591  * parent.
1592  */
__clk_recalc_accuracies(struct clk_core * core)1593 static void __clk_recalc_accuracies(struct clk_core *core)
1594 {
1595 	unsigned long parent_accuracy = 0;
1596 	struct clk_core *child;
1597 
1598 	lockdep_assert_held(&prepare_lock);
1599 
1600 	if (core->parent)
1601 		parent_accuracy = core->parent->accuracy;
1602 
1603 	if (core->ops->recalc_accuracy)
1604 		core->accuracy = core->ops->recalc_accuracy(core->hw,
1605 							  parent_accuracy);
1606 	else
1607 		core->accuracy = parent_accuracy;
1608 
1609 	hlist_for_each_entry(child, &core->children, child_node)
1610 		__clk_recalc_accuracies(child);
1611 }
1612 
clk_core_get_accuracy_recalc(struct clk_core * core)1613 static long clk_core_get_accuracy_recalc(struct clk_core *core)
1614 {
1615 	if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1616 		__clk_recalc_accuracies(core);
1617 
1618 	return clk_core_get_accuracy_no_lock(core);
1619 }
1620 
1621 /**
1622  * clk_get_accuracy - return the accuracy of clk
1623  * @clk: the clk whose accuracy is being returned
1624  *
1625  * Simply returns the cached accuracy of the clk, unless
1626  * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1627  * issued.
1628  * If clk is NULL then returns 0.
1629  */
clk_get_accuracy(struct clk * clk)1630 long clk_get_accuracy(struct clk *clk)
1631 {
1632 	long accuracy;
1633 
1634 	if (!clk)
1635 		return 0;
1636 
1637 	clk_prepare_lock();
1638 	accuracy = clk_core_get_accuracy_recalc(clk->core);
1639 	clk_prepare_unlock();
1640 
1641 	return accuracy;
1642 }
1643 EXPORT_SYMBOL_GPL(clk_get_accuracy);
1644 
clk_recalc(struct clk_core * core,unsigned long parent_rate)1645 static unsigned long clk_recalc(struct clk_core *core,
1646 				unsigned long parent_rate)
1647 {
1648 	unsigned long rate = parent_rate;
1649 
1650 	if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1651 		rate = core->ops->recalc_rate(core->hw, parent_rate);
1652 		clk_pm_runtime_put(core);
1653 	}
1654 	return rate;
1655 }
1656 
1657 /**
1658  * __clk_recalc_rates
1659  * @core: first clk in the subtree
1660  * @msg: notification type (see include/linux/clk.h)
1661  *
1662  * Walks the subtree of clks starting with clk and recalculates rates as it
1663  * goes.  Note that if a clk does not implement the .recalc_rate callback then
1664  * it is assumed that the clock will take on the rate of its parent.
1665  *
1666  * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1667  * if necessary.
1668  */
__clk_recalc_rates(struct clk_core * core,unsigned long msg)1669 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1670 {
1671 	unsigned long old_rate;
1672 	unsigned long parent_rate = 0;
1673 	struct clk_core *child;
1674 
1675 	lockdep_assert_held(&prepare_lock);
1676 
1677 	old_rate = core->rate;
1678 
1679 	if (core->parent)
1680 		parent_rate = core->parent->rate;
1681 
1682 	core->rate = clk_recalc(core, parent_rate);
1683 
1684 	/*
1685 	 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1686 	 * & ABORT_RATE_CHANGE notifiers
1687 	 */
1688 	if (core->notifier_count && msg)
1689 		__clk_notify(core, msg, old_rate, core->rate);
1690 
1691 	hlist_for_each_entry(child, &core->children, child_node)
1692 		__clk_recalc_rates(child, msg);
1693 }
1694 
clk_core_get_rate_recalc(struct clk_core * core)1695 static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
1696 {
1697 	if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1698 		__clk_recalc_rates(core, 0);
1699 
1700 	return clk_core_get_rate_nolock(core);
1701 }
1702 
1703 /**
1704  * clk_get_rate - return the rate of clk
1705  * @clk: the clk whose rate is being returned
1706  *
1707  * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1708  * is set, which means a recalc_rate will be issued.
1709  * If clk is NULL then returns 0.
1710  */
clk_get_rate(struct clk * clk)1711 unsigned long clk_get_rate(struct clk *clk)
1712 {
1713 	unsigned long rate;
1714 
1715 	if (!clk)
1716 		return 0;
1717 
1718 	clk_prepare_lock();
1719 	rate = clk_core_get_rate_recalc(clk->core);
1720 	clk_prepare_unlock();
1721 
1722 	return rate;
1723 }
1724 EXPORT_SYMBOL_GPL(clk_get_rate);
1725 
clk_fetch_parent_index(struct clk_core * core,struct clk_core * parent)1726 static int clk_fetch_parent_index(struct clk_core *core,
1727 				  struct clk_core *parent)
1728 {
1729 	int i;
1730 
1731 	if (!parent)
1732 		return -EINVAL;
1733 
1734 	for (i = 0; i < core->num_parents; i++) {
1735 		/* Found it first try! */
1736 		if (core->parents[i].core == parent)
1737 			return i;
1738 
1739 		/* Something else is here, so keep looking */
1740 		if (core->parents[i].core)
1741 			continue;
1742 
1743 		/* Maybe core hasn't been cached but the hw is all we know? */
1744 		if (core->parents[i].hw) {
1745 			if (core->parents[i].hw == parent->hw)
1746 				break;
1747 
1748 			/* Didn't match, but we're expecting a clk_hw */
1749 			continue;
1750 		}
1751 
1752 		/* Maybe it hasn't been cached (clk_set_parent() path) */
1753 		if (parent == clk_core_get(core, i))
1754 			break;
1755 
1756 		/* Fallback to comparing globally unique names */
1757 		if (core->parents[i].name &&
1758 		    !strcmp(parent->name, core->parents[i].name))
1759 			break;
1760 	}
1761 
1762 	if (i == core->num_parents)
1763 		return -EINVAL;
1764 
1765 	core->parents[i].core = parent;
1766 	return i;
1767 }
1768 
1769 /**
1770  * clk_hw_get_parent_index - return the index of the parent clock
1771  * @hw: clk_hw associated with the clk being consumed
1772  *
1773  * Fetches and returns the index of parent clock. Returns -EINVAL if the given
1774  * clock does not have a current parent.
1775  */
clk_hw_get_parent_index(struct clk_hw * hw)1776 int clk_hw_get_parent_index(struct clk_hw *hw)
1777 {
1778 	struct clk_hw *parent = clk_hw_get_parent(hw);
1779 
1780 	if (WARN_ON(parent == NULL))
1781 		return -EINVAL;
1782 
1783 	return clk_fetch_parent_index(hw->core, parent->core);
1784 }
1785 EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
1786 
clk_core_hold_state(struct clk_core * core)1787 static void clk_core_hold_state(struct clk_core *core)
1788 {
1789 	if (core->need_sync || !core->boot_enabled)
1790 		return;
1791 
1792 	if (core->orphan || !dev_has_sync_state(core->dev))
1793 		return;
1794 
1795 	if (core->flags & CLK_DONT_HOLD_STATE)
1796 		return;
1797 
1798 	core->need_sync = !clk_core_prepare_enable(core);
1799 }
1800 
__clk_core_update_orphan_hold_state(struct clk_core * core)1801 static void __clk_core_update_orphan_hold_state(struct clk_core *core)
1802 {
1803 	struct clk_core *child;
1804 
1805 	if (core->orphan)
1806 		return;
1807 
1808 	clk_core_hold_state(core);
1809 
1810 	hlist_for_each_entry(child, &core->children, child_node)
1811 		__clk_core_update_orphan_hold_state(child);
1812 }
1813 
1814 /*
1815  * Update the orphan status of @core and all its children.
1816  */
clk_core_update_orphan_status(struct clk_core * core,bool is_orphan)1817 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1818 {
1819 	struct clk_core *child;
1820 
1821 	core->orphan = is_orphan;
1822 
1823 	hlist_for_each_entry(child, &core->children, child_node)
1824 		clk_core_update_orphan_status(child, is_orphan);
1825 }
1826 
clk_reparent(struct clk_core * core,struct clk_core * new_parent)1827 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1828 {
1829 	bool was_orphan = core->orphan;
1830 
1831 	hlist_del(&core->child_node);
1832 
1833 	if (new_parent) {
1834 		bool becomes_orphan = new_parent->orphan;
1835 
1836 		/* avoid duplicate POST_RATE_CHANGE notifications */
1837 		if (new_parent->new_child == core)
1838 			new_parent->new_child = NULL;
1839 
1840 		hlist_add_head(&core->child_node, &new_parent->children);
1841 
1842 		if (was_orphan != becomes_orphan)
1843 			clk_core_update_orphan_status(core, becomes_orphan);
1844 	} else {
1845 		hlist_add_head(&core->child_node, &clk_orphan_list);
1846 		if (!was_orphan)
1847 			clk_core_update_orphan_status(core, true);
1848 	}
1849 
1850 	core->parent = new_parent;
1851 }
1852 
__clk_set_parent_before(struct clk_core * core,struct clk_core * parent)1853 static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1854 					   struct clk_core *parent)
1855 {
1856 	unsigned long flags;
1857 	struct clk_core *old_parent = core->parent;
1858 
1859 	/*
1860 	 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1861 	 *
1862 	 * 2. Migrate prepare state between parents and prevent race with
1863 	 * clk_enable().
1864 	 *
1865 	 * If the clock is not prepared, then a race with
1866 	 * clk_enable/disable() is impossible since we already have the
1867 	 * prepare lock (future calls to clk_enable() need to be preceded by
1868 	 * a clk_prepare()).
1869 	 *
1870 	 * If the clock is prepared, migrate the prepared state to the new
1871 	 * parent and also protect against a race with clk_enable() by
1872 	 * forcing the clock and the new parent on.  This ensures that all
1873 	 * future calls to clk_enable() are practically NOPs with respect to
1874 	 * hardware and software states.
1875 	 *
1876 	 * See also: Comment for clk_set_parent() below.
1877 	 */
1878 
1879 	/* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1880 	if (core->flags & CLK_OPS_PARENT_ENABLE) {
1881 		clk_core_prepare_enable(old_parent);
1882 		clk_core_prepare_enable(parent);
1883 	}
1884 
1885 	/* migrate prepare count if > 0 */
1886 	if (core->prepare_count) {
1887 		clk_core_prepare_enable(parent);
1888 		clk_core_enable_lock(core);
1889 	}
1890 
1891 	/* update the clk tree topology */
1892 	flags = clk_enable_lock();
1893 	clk_reparent(core, parent);
1894 	clk_enable_unlock(flags);
1895 
1896 	return old_parent;
1897 }
1898 
__clk_set_parent_after(struct clk_core * core,struct clk_core * parent,struct clk_core * old_parent)1899 static void __clk_set_parent_after(struct clk_core *core,
1900 				   struct clk_core *parent,
1901 				   struct clk_core *old_parent)
1902 {
1903 	/*
1904 	 * Finish the migration of prepare state and undo the changes done
1905 	 * for preventing a race with clk_enable().
1906 	 */
1907 	if (core->prepare_count) {
1908 		clk_core_disable_lock(core);
1909 		clk_core_disable_unprepare(old_parent);
1910 	}
1911 
1912 	/* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1913 	if (core->flags & CLK_OPS_PARENT_ENABLE) {
1914 		clk_core_disable_unprepare(parent);
1915 		clk_core_disable_unprepare(old_parent);
1916 	}
1917 }
1918 
__clk_set_parent(struct clk_core * core,struct clk_core * parent,u8 p_index)1919 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1920 			    u8 p_index)
1921 {
1922 	unsigned long flags;
1923 	int ret = 0;
1924 	struct clk_core *old_parent;
1925 
1926 	old_parent = __clk_set_parent_before(core, parent);
1927 
1928 	trace_clk_set_parent(core, parent);
1929 
1930 	/* change clock input source */
1931 	if (parent && core->ops->set_parent)
1932 		ret = core->ops->set_parent(core->hw, p_index);
1933 
1934 	trace_clk_set_parent_complete(core, parent);
1935 
1936 	if (ret) {
1937 		flags = clk_enable_lock();
1938 		clk_reparent(core, old_parent);
1939 		clk_enable_unlock(flags);
1940 		__clk_set_parent_after(core, old_parent, parent);
1941 
1942 		return ret;
1943 	}
1944 
1945 	__clk_set_parent_after(core, parent, old_parent);
1946 
1947 	return 0;
1948 }
1949 
1950 /**
1951  * __clk_speculate_rates
1952  * @core: first clk in the subtree
1953  * @parent_rate: the "future" rate of clk's parent
1954  *
1955  * Walks the subtree of clks starting with clk, speculating rates as it
1956  * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1957  *
1958  * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1959  * pre-rate change notifications and returns early if no clks in the
1960  * subtree have subscribed to the notifications.  Note that if a clk does not
1961  * implement the .recalc_rate callback then it is assumed that the clock will
1962  * take on the rate of its parent.
1963  */
__clk_speculate_rates(struct clk_core * core,unsigned long parent_rate)1964 static int __clk_speculate_rates(struct clk_core *core,
1965 				 unsigned long parent_rate)
1966 {
1967 	struct clk_core *child;
1968 	unsigned long new_rate;
1969 	int ret = NOTIFY_DONE;
1970 
1971 	lockdep_assert_held(&prepare_lock);
1972 
1973 	new_rate = clk_recalc(core, parent_rate);
1974 
1975 	/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1976 	if (core->notifier_count)
1977 		ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1978 
1979 	if (ret & NOTIFY_STOP_MASK) {
1980 		pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1981 				__func__, core->name, ret);
1982 		goto out;
1983 	}
1984 
1985 	hlist_for_each_entry(child, &core->children, child_node) {
1986 		ret = __clk_speculate_rates(child, new_rate);
1987 		if (ret & NOTIFY_STOP_MASK)
1988 			break;
1989 	}
1990 
1991 out:
1992 	return ret;
1993 }
1994 
clk_calc_subtree(struct clk_core * core,unsigned long new_rate,struct clk_core * new_parent,u8 p_index)1995 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1996 			     struct clk_core *new_parent, u8 p_index)
1997 {
1998 	struct clk_core *child;
1999 
2000 	core->new_rate = new_rate;
2001 	core->new_parent = new_parent;
2002 	core->new_parent_index = p_index;
2003 	/* include clk in new parent's PRE_RATE_CHANGE notifications */
2004 	core->new_child = NULL;
2005 	if (new_parent && new_parent != core->parent)
2006 		new_parent->new_child = core;
2007 
2008 	hlist_for_each_entry(child, &core->children, child_node) {
2009 		child->new_rate = clk_recalc(child, new_rate);
2010 		clk_calc_subtree(child, child->new_rate, NULL, 0);
2011 	}
2012 }
2013 
2014 /*
2015  * calculate the new rates returning the topmost clock that has to be
2016  * changed.
2017  */
clk_calc_new_rates(struct clk_core * core,unsigned long rate)2018 static struct clk_core *clk_calc_new_rates(struct clk_core *core,
2019 					   unsigned long rate)
2020 {
2021 	struct clk_core *top = core;
2022 	struct clk_core *old_parent, *parent;
2023 	unsigned long best_parent_rate = 0;
2024 	unsigned long new_rate;
2025 	unsigned long min_rate;
2026 	unsigned long max_rate;
2027 	int p_index = 0;
2028 	long ret;
2029 
2030 	/* sanity */
2031 	if (IS_ERR_OR_NULL(core))
2032 		return NULL;
2033 
2034 	/* save parent rate, if it exists */
2035 	parent = old_parent = core->parent;
2036 	if (parent)
2037 		best_parent_rate = parent->rate;
2038 
2039 	clk_core_get_boundaries(core, &min_rate, &max_rate);
2040 
2041 	/* find the closest rate and parent clk/rate */
2042 	if (clk_core_can_round(core)) {
2043 		struct clk_rate_request req;
2044 
2045 		req.rate = rate;
2046 		req.min_rate = min_rate;
2047 		req.max_rate = max_rate;
2048 
2049 		clk_core_init_rate_req(core, &req);
2050 
2051 		ret = clk_core_determine_round_nolock(core, &req);
2052 		if (ret < 0)
2053 			return NULL;
2054 
2055 		best_parent_rate = req.best_parent_rate;
2056 		new_rate = req.rate;
2057 		parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
2058 
2059 		if (new_rate < min_rate || new_rate > max_rate)
2060 			return NULL;
2061 	} else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
2062 		/* pass-through clock without adjustable parent */
2063 		core->new_rate = core->rate;
2064 		return NULL;
2065 	} else {
2066 		/* pass-through clock with adjustable parent */
2067 		top = clk_calc_new_rates(parent, rate);
2068 		new_rate = parent->new_rate;
2069 		goto out;
2070 	}
2071 
2072 	/* some clocks must be gated to change parent */
2073 	if (parent != old_parent &&
2074 	    (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
2075 		pr_debug("%s: %s not gated but wants to reparent\n",
2076 			 __func__, core->name);
2077 		return NULL;
2078 	}
2079 
2080 	/* try finding the new parent index */
2081 	if (parent && core->num_parents > 1) {
2082 		p_index = clk_fetch_parent_index(core, parent);
2083 		if (p_index < 0) {
2084 			pr_debug("%s: clk %s can not be parent of clk %s\n",
2085 				 __func__, parent->name, core->name);
2086 			return NULL;
2087 		}
2088 	}
2089 
2090 	if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
2091 	    best_parent_rate != parent->rate)
2092 		top = clk_calc_new_rates(parent, best_parent_rate);
2093 
2094 out:
2095 	clk_calc_subtree(core, new_rate, parent, p_index);
2096 
2097 	return top;
2098 }
2099 
2100 /*
2101  * Notify about rate changes in a subtree. Always walk down the whole tree
2102  * so that in case of an error we can walk down the whole tree again and
2103  * abort the change.
2104  */
clk_propagate_rate_change(struct clk_core * core,unsigned long event)2105 static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
2106 						  unsigned long event)
2107 {
2108 	struct clk_core *child, *tmp_clk, *fail_clk = NULL;
2109 	int ret = NOTIFY_DONE;
2110 
2111 	if (core->rate == core->new_rate)
2112 		return NULL;
2113 
2114 	if (core->notifier_count) {
2115 		ret = __clk_notify(core, event, core->rate, core->new_rate);
2116 		if (ret & NOTIFY_STOP_MASK)
2117 			fail_clk = core;
2118 	}
2119 
2120 	if (core->ops->pre_rate_change) {
2121 		ret = core->ops->pre_rate_change(core->hw, core->rate,
2122 						 core->new_rate);
2123 		if (ret)
2124 			fail_clk = core;
2125 	}
2126 
2127 	hlist_for_each_entry(child, &core->children, child_node) {
2128 		/* Skip children who will be reparented to another clock */
2129 		if (child->new_parent && child->new_parent != core)
2130 			continue;
2131 		tmp_clk = clk_propagate_rate_change(child, event);
2132 		if (tmp_clk)
2133 			fail_clk = tmp_clk;
2134 	}
2135 
2136 	/* handle the new child who might not be in core->children yet */
2137 	if (core->new_child) {
2138 		tmp_clk = clk_propagate_rate_change(core->new_child, event);
2139 		if (tmp_clk)
2140 			fail_clk = tmp_clk;
2141 	}
2142 
2143 	return fail_clk;
2144 }
2145 
2146 /*
2147  * walk down a subtree and set the new rates notifying the rate
2148  * change on the way
2149  */
clk_change_rate(struct clk_core * core)2150 static void clk_change_rate(struct clk_core *core)
2151 {
2152 	struct clk_core *child;
2153 	struct hlist_node *tmp;
2154 	unsigned long old_rate;
2155 	unsigned long best_parent_rate = 0;
2156 	bool skip_set_rate = false;
2157 	struct clk_core *old_parent;
2158 	struct clk_core *parent = NULL;
2159 
2160 	old_rate = core->rate;
2161 
2162 	if (core->new_parent) {
2163 		parent = core->new_parent;
2164 		best_parent_rate = core->new_parent->rate;
2165 	} else if (core->parent) {
2166 		parent = core->parent;
2167 		best_parent_rate = core->parent->rate;
2168 	}
2169 
2170 	if (clk_pm_runtime_get(core))
2171 		return;
2172 
2173 	if (core->flags & CLK_SET_RATE_UNGATE) {
2174 		clk_core_prepare(core);
2175 		clk_core_enable_lock(core);
2176 	}
2177 
2178 	if (core->new_parent && core->new_parent != core->parent) {
2179 		old_parent = __clk_set_parent_before(core, core->new_parent);
2180 		trace_clk_set_parent(core, core->new_parent);
2181 
2182 		if (core->ops->set_rate_and_parent) {
2183 			skip_set_rate = true;
2184 			core->ops->set_rate_and_parent(core->hw, core->new_rate,
2185 					best_parent_rate,
2186 					core->new_parent_index);
2187 		} else if (core->ops->set_parent) {
2188 			core->ops->set_parent(core->hw, core->new_parent_index);
2189 		}
2190 
2191 		trace_clk_set_parent_complete(core, core->new_parent);
2192 		__clk_set_parent_after(core, core->new_parent, old_parent);
2193 	}
2194 
2195 	if (core->flags & CLK_OPS_PARENT_ENABLE)
2196 		clk_core_prepare_enable(parent);
2197 
2198 	trace_clk_set_rate(core, core->new_rate);
2199 
2200 	if (!skip_set_rate && core->ops->set_rate)
2201 		core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2202 
2203 	trace_clk_set_rate_complete(core, core->new_rate);
2204 
2205 	core->rate = clk_recalc(core, best_parent_rate);
2206 
2207 	if (core->flags & CLK_SET_RATE_UNGATE) {
2208 		clk_core_disable_lock(core);
2209 		clk_core_unprepare(core);
2210 	}
2211 
2212 	if (core->flags & CLK_OPS_PARENT_ENABLE)
2213 		clk_core_disable_unprepare(parent);
2214 
2215 	if (core->notifier_count && old_rate != core->rate)
2216 		__clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2217 
2218 	if (core->flags & CLK_RECALC_NEW_RATES)
2219 		(void)clk_calc_new_rates(core, core->new_rate);
2220 
2221 	if (core->ops->post_rate_change)
2222 		core->ops->post_rate_change(core->hw, old_rate, core->rate);
2223 
2224 	/*
2225 	 * Use safe iteration, as change_rate can actually swap parents
2226 	 * for certain clock types.
2227 	 */
2228 	hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2229 		/* Skip children who will be reparented to another clock */
2230 		if (child->new_parent && child->new_parent != core)
2231 			continue;
2232 		clk_change_rate(child);
2233 	}
2234 
2235 	/* handle the new child who might not be in core->children yet */
2236 	if (core->new_child)
2237 		clk_change_rate(core->new_child);
2238 
2239 	clk_pm_runtime_put(core);
2240 }
2241 
clk_core_req_round_rate_nolock(struct clk_core * core,unsigned long req_rate)2242 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2243 						     unsigned long req_rate)
2244 {
2245 	int ret, cnt;
2246 	struct clk_rate_request req;
2247 
2248 	lockdep_assert_held(&prepare_lock);
2249 
2250 	if (!core)
2251 		return 0;
2252 
2253 	/* simulate what the rate would be if it could be freely set */
2254 	cnt = clk_core_rate_nuke_protect(core);
2255 	if (cnt < 0)
2256 		return cnt;
2257 
2258 	clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
2259 	req.rate = req_rate;
2260 
2261 	ret = clk_core_round_rate_nolock(core, &req);
2262 
2263 	/* restore the protection */
2264 	clk_core_rate_restore_protect(core, cnt);
2265 
2266 	return ret ? 0 : req.rate;
2267 }
2268 
clk_core_set_rate_nolock(struct clk_core * core,unsigned long req_rate)2269 static int clk_core_set_rate_nolock(struct clk_core *core,
2270 				    unsigned long req_rate)
2271 {
2272 	struct clk_core *top, *fail_clk;
2273 	unsigned long rate;
2274 	int ret = 0;
2275 
2276 	if (!core)
2277 		return 0;
2278 
2279 	rate = clk_core_req_round_rate_nolock(core, req_rate);
2280 
2281 	/* bail early if nothing to do */
2282 	if (rate == clk_core_get_rate_nolock(core))
2283 		return 0;
2284 
2285 	/* fail on a direct rate set of a protected provider */
2286 	if (clk_core_rate_is_protected(core))
2287 		return -EBUSY;
2288 
2289 	/* calculate new rates and get the topmost changed clock */
2290 	top = clk_calc_new_rates(core, req_rate);
2291 	if (!top)
2292 		return -EINVAL;
2293 
2294 	ret = clk_pm_runtime_get(core);
2295 	if (ret)
2296 		return ret;
2297 
2298 	/* notify that we are about to change rates */
2299 	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2300 	if (fail_clk) {
2301 		pr_debug("%s: failed to set %s rate\n", __func__,
2302 				fail_clk->name);
2303 		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2304 		ret = -EBUSY;
2305 		goto err;
2306 	}
2307 
2308 	/* change the rates */
2309 	clk_change_rate(top);
2310 
2311 	core->req_rate = req_rate;
2312 err:
2313 	clk_pm_runtime_put(core);
2314 
2315 	return ret;
2316 }
2317 
2318 /**
2319  * clk_set_rate - specify a new rate for clk
2320  * @clk: the clk whose rate is being changed
2321  * @rate: the new rate for clk
2322  *
2323  * In the simplest case clk_set_rate will only adjust the rate of clk.
2324  *
2325  * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
2326  * propagate up to clk's parent; whether or not this happens depends on the
2327  * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
2328  * after calling .round_rate then upstream parent propagation is ignored.  If
2329  * *parent_rate comes back with a new rate for clk's parent then we propagate
2330  * up to clk's parent and set its rate.  Upward propagation will continue
2331  * until either a clk does not support the CLK_SET_RATE_PARENT flag or
2332  * .round_rate stops requesting changes to clk's parent_rate.
2333  *
2334  * Rate changes are accomplished via tree traversal that also recalculates the
2335  * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
2336  *
2337  * Returns 0 on success, -EERROR otherwise.
2338  */
clk_set_rate(struct clk * clk,unsigned long rate)2339 int clk_set_rate(struct clk *clk, unsigned long rate)
2340 {
2341 	int ret;
2342 
2343 	if (!clk)
2344 		return 0;
2345 
2346 	/* prevent racing with updates to the clock topology */
2347 	clk_prepare_lock();
2348 
2349 	if (clk->exclusive_count)
2350 		clk_core_rate_unprotect(clk->core);
2351 
2352 	ret = clk_core_set_rate_nolock(clk->core, rate);
2353 
2354 	if (clk->exclusive_count)
2355 		clk_core_rate_protect(clk->core);
2356 
2357 	clk_prepare_unlock();
2358 
2359 	return ret;
2360 }
2361 EXPORT_SYMBOL_GPL(clk_set_rate);
2362 
2363 /**
2364  * clk_set_rate_exclusive - specify a new rate and get exclusive control
2365  * @clk: the clk whose rate is being changed
2366  * @rate: the new rate for clk
2367  *
2368  * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
2369  * within a critical section
2370  *
2371  * This can be used initially to ensure that at least 1 consumer is
2372  * satisfied when several consumers are competing for exclusivity over the
2373  * same clock provider.
2374  *
2375  * The exclusivity is not applied if setting the rate failed.
2376  *
2377  * Calls to clk_rate_exclusive_get() should be balanced with calls to
2378  * clk_rate_exclusive_put().
2379  *
2380  * Returns 0 on success, -EERROR otherwise.
2381  */
clk_set_rate_exclusive(struct clk * clk,unsigned long rate)2382 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2383 {
2384 	int ret;
2385 
2386 	if (!clk)
2387 		return 0;
2388 
2389 	/* prevent racing with updates to the clock topology */
2390 	clk_prepare_lock();
2391 
2392 	/*
2393 	 * The temporary protection removal is not here, on purpose
2394 	 * This function is meant to be used instead of clk_rate_protect,
2395 	 * so before the consumer code path protect the clock provider
2396 	 */
2397 
2398 	ret = clk_core_set_rate_nolock(clk->core, rate);
2399 	if (!ret) {
2400 		clk_core_rate_protect(clk->core);
2401 		clk->exclusive_count++;
2402 	}
2403 
2404 	clk_prepare_unlock();
2405 
2406 	return ret;
2407 }
2408 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2409 
2410 /**
2411  * clk_set_rate_range - set a rate range for a clock source
2412  * @clk: clock source
2413  * @min: desired minimum clock rate in Hz, inclusive
2414  * @max: desired maximum clock rate in Hz, inclusive
2415  *
2416  * Returns success (0) or negative errno.
2417  */
clk_set_rate_range(struct clk * clk,unsigned long min,unsigned long max)2418 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2419 {
2420 	int ret = 0;
2421 	unsigned long old_min, old_max, rate;
2422 
2423 	if (!clk)
2424 		return 0;
2425 
2426 	trace_clk_set_rate_range(clk->core, min, max);
2427 
2428 	if (min > max) {
2429 		pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2430 		       __func__, clk->core->name, clk->dev_id, clk->con_id,
2431 		       min, max);
2432 		return -EINVAL;
2433 	}
2434 
2435 	clk_prepare_lock();
2436 
2437 	if (clk->exclusive_count)
2438 		clk_core_rate_unprotect(clk->core);
2439 
2440 	/* Save the current values in case we need to rollback the change */
2441 	old_min = clk->min_rate;
2442 	old_max = clk->max_rate;
2443 	clk->min_rate = min;
2444 	clk->max_rate = max;
2445 
2446 	if (!clk_core_check_boundaries(clk->core, min, max)) {
2447 		ret = -EINVAL;
2448 		goto out;
2449 	}
2450 
2451 	rate = clk_core_get_rate_nolock(clk->core);
2452 	if (rate < min || rate > max) {
2453 		/*
2454 		 * FIXME:
2455 		 * We are in bit of trouble here, current rate is outside the
2456 		 * the requested range. We are going try to request appropriate
2457 		 * range boundary but there is a catch. It may fail for the
2458 		 * usual reason (clock broken, clock protected, etc) but also
2459 		 * because:
2460 		 * - round_rate() was not favorable and fell on the wrong
2461 		 *   side of the boundary
2462 		 * - the determine_rate() callback does not really check for
2463 		 *   this corner case when determining the rate
2464 		 */
2465 
2466 		if (rate < min)
2467 			rate = min;
2468 		else
2469 			rate = max;
2470 
2471 		ret = clk_core_set_rate_nolock(clk->core, rate);
2472 		if (ret) {
2473 			/* rollback the changes */
2474 			clk->min_rate = old_min;
2475 			clk->max_rate = old_max;
2476 		}
2477 	}
2478 
2479 out:
2480 	if (clk->exclusive_count)
2481 		clk_core_rate_protect(clk->core);
2482 
2483 	clk_prepare_unlock();
2484 
2485 	return ret;
2486 }
2487 EXPORT_SYMBOL_GPL(clk_set_rate_range);
2488 
2489 /**
2490  * clk_set_min_rate - set a minimum clock rate for a clock source
2491  * @clk: clock source
2492  * @rate: desired minimum clock rate in Hz, inclusive
2493  *
2494  * Returns success (0) or negative errno.
2495  */
clk_set_min_rate(struct clk * clk,unsigned long rate)2496 int clk_set_min_rate(struct clk *clk, unsigned long rate)
2497 {
2498 	if (!clk)
2499 		return 0;
2500 
2501 	trace_clk_set_min_rate(clk->core, rate);
2502 
2503 	return clk_set_rate_range(clk, rate, clk->max_rate);
2504 }
2505 EXPORT_SYMBOL_GPL(clk_set_min_rate);
2506 
2507 /**
2508  * clk_set_max_rate - set a maximum clock rate for a clock source
2509  * @clk: clock source
2510  * @rate: desired maximum clock rate in Hz, inclusive
2511  *
2512  * Returns success (0) or negative errno.
2513  */
clk_set_max_rate(struct clk * clk,unsigned long rate)2514 int clk_set_max_rate(struct clk *clk, unsigned long rate)
2515 {
2516 	if (!clk)
2517 		return 0;
2518 
2519 	trace_clk_set_max_rate(clk->core, rate);
2520 
2521 	return clk_set_rate_range(clk, clk->min_rate, rate);
2522 }
2523 EXPORT_SYMBOL_GPL(clk_set_max_rate);
2524 
2525 /**
2526  * clk_get_parent - return the parent of a clk
2527  * @clk: the clk whose parent gets returned
2528  *
2529  * Simply returns clk->parent.  Returns NULL if clk is NULL.
2530  */
clk_get_parent(struct clk * clk)2531 struct clk *clk_get_parent(struct clk *clk)
2532 {
2533 	struct clk *parent;
2534 
2535 	if (!clk)
2536 		return NULL;
2537 
2538 	clk_prepare_lock();
2539 	/* TODO: Create a per-user clk and change callers to call clk_put */
2540 	parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2541 	clk_prepare_unlock();
2542 
2543 	return parent;
2544 }
2545 EXPORT_SYMBOL_GPL(clk_get_parent);
2546 
__clk_init_parent(struct clk_core * core)2547 static struct clk_core *__clk_init_parent(struct clk_core *core)
2548 {
2549 	u8 index = 0;
2550 
2551 	if (core->num_parents > 1 && core->ops->get_parent)
2552 		index = core->ops->get_parent(core->hw);
2553 
2554 	return clk_core_get_parent_by_index(core, index);
2555 }
2556 
clk_core_reparent(struct clk_core * core,struct clk_core * new_parent)2557 static void clk_core_reparent(struct clk_core *core,
2558 				  struct clk_core *new_parent)
2559 {
2560 	clk_reparent(core, new_parent);
2561 	__clk_recalc_accuracies(core);
2562 	__clk_recalc_rates(core, POST_RATE_CHANGE);
2563 }
2564 
clk_hw_reparent(struct clk_hw * hw,struct clk_hw * new_parent)2565 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2566 {
2567 	if (!hw)
2568 		return;
2569 
2570 	clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2571 }
2572 
2573 /**
2574  * clk_has_parent - check if a clock is a possible parent for another
2575  * @clk: clock source
2576  * @parent: parent clock source
2577  *
2578  * This function can be used in drivers that need to check that a clock can be
2579  * the parent of another without actually changing the parent.
2580  *
2581  * Returns true if @parent is a possible parent for @clk, false otherwise.
2582  */
clk_has_parent(struct clk * clk,struct clk * parent)2583 bool clk_has_parent(struct clk *clk, struct clk *parent)
2584 {
2585 	struct clk_core *core, *parent_core;
2586 	int i;
2587 
2588 	/* NULL clocks should be nops, so return success if either is NULL. */
2589 	if (!clk || !parent)
2590 		return true;
2591 
2592 	core = clk->core;
2593 	parent_core = parent->core;
2594 
2595 	/* Optimize for the case where the parent is already the parent. */
2596 	if (core->parent == parent_core)
2597 		return true;
2598 
2599 	for (i = 0; i < core->num_parents; i++)
2600 		if (!strcmp(core->parents[i].name, parent_core->name))
2601 			return true;
2602 
2603 	return false;
2604 }
2605 EXPORT_SYMBOL_GPL(clk_has_parent);
2606 
clk_core_set_parent_nolock(struct clk_core * core,struct clk_core * parent)2607 static int clk_core_set_parent_nolock(struct clk_core *core,
2608 				      struct clk_core *parent)
2609 {
2610 	int ret = 0;
2611 	int p_index = 0;
2612 	unsigned long p_rate = 0;
2613 
2614 	lockdep_assert_held(&prepare_lock);
2615 
2616 	if (!core)
2617 		return 0;
2618 
2619 	if (core->parent == parent)
2620 		return 0;
2621 
2622 	/* verify ops for multi-parent clks */
2623 	if (core->num_parents > 1 && !core->ops->set_parent)
2624 		return -EPERM;
2625 
2626 	/* check that we are allowed to re-parent if the clock is in use */
2627 	if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2628 		return -EBUSY;
2629 
2630 	if (clk_core_rate_is_protected(core))
2631 		return -EBUSY;
2632 
2633 	/* try finding the new parent index */
2634 	if (parent) {
2635 		p_index = clk_fetch_parent_index(core, parent);
2636 		if (p_index < 0) {
2637 			pr_debug("%s: clk %s can not be parent of clk %s\n",
2638 					__func__, parent->name, core->name);
2639 			return p_index;
2640 		}
2641 		p_rate = parent->rate;
2642 	}
2643 
2644 	ret = clk_pm_runtime_get(core);
2645 	if (ret)
2646 		return ret;
2647 
2648 	/* propagate PRE_RATE_CHANGE notifications */
2649 	ret = __clk_speculate_rates(core, p_rate);
2650 
2651 	/* abort if a driver objects */
2652 	if (ret & NOTIFY_STOP_MASK)
2653 		goto runtime_put;
2654 
2655 	/* do the re-parent */
2656 	ret = __clk_set_parent(core, parent, p_index);
2657 
2658 	/* propagate rate an accuracy recalculation accordingly */
2659 	if (ret) {
2660 		__clk_recalc_rates(core, ABORT_RATE_CHANGE);
2661 	} else {
2662 		__clk_recalc_rates(core, POST_RATE_CHANGE);
2663 		__clk_recalc_accuracies(core);
2664 	}
2665 
2666 runtime_put:
2667 	clk_pm_runtime_put(core);
2668 
2669 	return ret;
2670 }
2671 
clk_hw_set_parent(struct clk_hw * hw,struct clk_hw * parent)2672 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2673 {
2674 	return clk_core_set_parent_nolock(hw->core, parent->core);
2675 }
2676 EXPORT_SYMBOL_GPL(clk_hw_set_parent);
2677 
2678 /**
2679  * clk_set_parent - switch the parent of a mux clk
2680  * @clk: the mux clk whose input we are switching
2681  * @parent: the new input to clk
2682  *
2683  * Re-parent clk to use parent as its new input source.  If clk is in
2684  * prepared state, the clk will get enabled for the duration of this call. If
2685  * that's not acceptable for a specific clk (Eg: the consumer can't handle
2686  * that, the reparenting is glitchy in hardware, etc), use the
2687  * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2688  *
2689  * After successfully changing clk's parent clk_set_parent will update the
2690  * clk topology, sysfs topology and propagate rate recalculation via
2691  * __clk_recalc_rates.
2692  *
2693  * Returns 0 on success, -EERROR otherwise.
2694  */
clk_set_parent(struct clk * clk,struct clk * parent)2695 int clk_set_parent(struct clk *clk, struct clk *parent)
2696 {
2697 	int ret;
2698 
2699 	if (!clk)
2700 		return 0;
2701 
2702 	clk_prepare_lock();
2703 
2704 	if (clk->exclusive_count)
2705 		clk_core_rate_unprotect(clk->core);
2706 
2707 	ret = clk_core_set_parent_nolock(clk->core,
2708 					 parent ? parent->core : NULL);
2709 
2710 	if (clk->exclusive_count)
2711 		clk_core_rate_protect(clk->core);
2712 
2713 	clk_prepare_unlock();
2714 
2715 	return ret;
2716 }
2717 EXPORT_SYMBOL_GPL(clk_set_parent);
2718 
clk_core_set_phase_nolock(struct clk_core * core,int degrees)2719 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2720 {
2721 	int ret = -EINVAL;
2722 
2723 	lockdep_assert_held(&prepare_lock);
2724 
2725 	if (!core)
2726 		return 0;
2727 
2728 	if (clk_core_rate_is_protected(core))
2729 		return -EBUSY;
2730 
2731 	trace_clk_set_phase(core, degrees);
2732 
2733 	if (core->ops->set_phase) {
2734 		ret = core->ops->set_phase(core->hw, degrees);
2735 		if (!ret)
2736 			core->phase = degrees;
2737 	}
2738 
2739 	trace_clk_set_phase_complete(core, degrees);
2740 
2741 	return ret;
2742 }
2743 
2744 /**
2745  * clk_set_phase - adjust the phase shift of a clock signal
2746  * @clk: clock signal source
2747  * @degrees: number of degrees the signal is shifted
2748  *
2749  * Shifts the phase of a clock signal by the specified
2750  * degrees. Returns 0 on success, -EERROR otherwise.
2751  *
2752  * This function makes no distinction about the input or reference
2753  * signal that we adjust the clock signal phase against. For example
2754  * phase locked-loop clock signal generators we may shift phase with
2755  * respect to feedback clock signal input, but for other cases the
2756  * clock phase may be shifted with respect to some other, unspecified
2757  * signal.
2758  *
2759  * Additionally the concept of phase shift does not propagate through
2760  * the clock tree hierarchy, which sets it apart from clock rates and
2761  * clock accuracy. A parent clock phase attribute does not have an
2762  * impact on the phase attribute of a child clock.
2763  */
clk_set_phase(struct clk * clk,int degrees)2764 int clk_set_phase(struct clk *clk, int degrees)
2765 {
2766 	int ret;
2767 
2768 	if (!clk)
2769 		return 0;
2770 
2771 	/* sanity check degrees */
2772 	degrees %= 360;
2773 	if (degrees < 0)
2774 		degrees += 360;
2775 
2776 	clk_prepare_lock();
2777 
2778 	if (clk->exclusive_count)
2779 		clk_core_rate_unprotect(clk->core);
2780 
2781 	ret = clk_core_set_phase_nolock(clk->core, degrees);
2782 
2783 	if (clk->exclusive_count)
2784 		clk_core_rate_protect(clk->core);
2785 
2786 	clk_prepare_unlock();
2787 
2788 	return ret;
2789 }
2790 EXPORT_SYMBOL_GPL(clk_set_phase);
2791 
clk_core_get_phase(struct clk_core * core)2792 static int clk_core_get_phase(struct clk_core *core)
2793 {
2794 	int ret;
2795 
2796 	lockdep_assert_held(&prepare_lock);
2797 	if (!core->ops->get_phase)
2798 		return 0;
2799 
2800 	/* Always try to update cached phase if possible */
2801 	ret = core->ops->get_phase(core->hw);
2802 	if (ret >= 0)
2803 		core->phase = ret;
2804 
2805 	return ret;
2806 }
2807 
2808 /**
2809  * clk_get_phase - return the phase shift of a clock signal
2810  * @clk: clock signal source
2811  *
2812  * Returns the phase shift of a clock node in degrees, otherwise returns
2813  * -EERROR.
2814  */
clk_get_phase(struct clk * clk)2815 int clk_get_phase(struct clk *clk)
2816 {
2817 	int ret;
2818 
2819 	if (!clk)
2820 		return 0;
2821 
2822 	clk_prepare_lock();
2823 	ret = clk_core_get_phase(clk->core);
2824 	clk_prepare_unlock();
2825 
2826 	return ret;
2827 }
2828 EXPORT_SYMBOL_GPL(clk_get_phase);
2829 
clk_core_reset_duty_cycle_nolock(struct clk_core * core)2830 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2831 {
2832 	/* Assume a default value of 50% */
2833 	core->duty.num = 1;
2834 	core->duty.den = 2;
2835 }
2836 
2837 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2838 
clk_core_update_duty_cycle_nolock(struct clk_core * core)2839 static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2840 {
2841 	struct clk_duty *duty = &core->duty;
2842 	int ret = 0;
2843 
2844 	if (!core->ops->get_duty_cycle)
2845 		return clk_core_update_duty_cycle_parent_nolock(core);
2846 
2847 	ret = core->ops->get_duty_cycle(core->hw, duty);
2848 	if (ret)
2849 		goto reset;
2850 
2851 	/* Don't trust the clock provider too much */
2852 	if (duty->den == 0 || duty->num > duty->den) {
2853 		ret = -EINVAL;
2854 		goto reset;
2855 	}
2856 
2857 	return 0;
2858 
2859 reset:
2860 	clk_core_reset_duty_cycle_nolock(core);
2861 	return ret;
2862 }
2863 
clk_core_update_duty_cycle_parent_nolock(struct clk_core * core)2864 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
2865 {
2866 	int ret = 0;
2867 
2868 	if (core->parent &&
2869 	    core->flags & CLK_DUTY_CYCLE_PARENT) {
2870 		ret = clk_core_update_duty_cycle_nolock(core->parent);
2871 		memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2872 	} else {
2873 		clk_core_reset_duty_cycle_nolock(core);
2874 	}
2875 
2876 	return ret;
2877 }
2878 
2879 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2880 						 struct clk_duty *duty);
2881 
clk_core_set_duty_cycle_nolock(struct clk_core * core,struct clk_duty * duty)2882 static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
2883 					  struct clk_duty *duty)
2884 {
2885 	int ret;
2886 
2887 	lockdep_assert_held(&prepare_lock);
2888 
2889 	if (clk_core_rate_is_protected(core))
2890 		return -EBUSY;
2891 
2892 	trace_clk_set_duty_cycle(core, duty);
2893 
2894 	if (!core->ops->set_duty_cycle)
2895 		return clk_core_set_duty_cycle_parent_nolock(core, duty);
2896 
2897 	ret = core->ops->set_duty_cycle(core->hw, duty);
2898 	if (!ret)
2899 		memcpy(&core->duty, duty, sizeof(*duty));
2900 
2901 	trace_clk_set_duty_cycle_complete(core, duty);
2902 
2903 	return ret;
2904 }
2905 
clk_core_set_duty_cycle_parent_nolock(struct clk_core * core,struct clk_duty * duty)2906 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2907 						 struct clk_duty *duty)
2908 {
2909 	int ret = 0;
2910 
2911 	if (core->parent &&
2912 	    core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2913 		ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2914 		memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2915 	}
2916 
2917 	return ret;
2918 }
2919 
2920 /**
2921  * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
2922  * @clk: clock signal source
2923  * @num: numerator of the duty cycle ratio to be applied
2924  * @den: denominator of the duty cycle ratio to be applied
2925  *
2926  * Apply the duty cycle ratio if the ratio is valid and the clock can
2927  * perform this operation
2928  *
2929  * Returns (0) on success, a negative errno otherwise.
2930  */
clk_set_duty_cycle(struct clk * clk,unsigned int num,unsigned int den)2931 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2932 {
2933 	int ret;
2934 	struct clk_duty duty;
2935 
2936 	if (!clk)
2937 		return 0;
2938 
2939 	/* sanity check the ratio */
2940 	if (den == 0 || num > den)
2941 		return -EINVAL;
2942 
2943 	duty.num = num;
2944 	duty.den = den;
2945 
2946 	clk_prepare_lock();
2947 
2948 	if (clk->exclusive_count)
2949 		clk_core_rate_unprotect(clk->core);
2950 
2951 	ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2952 
2953 	if (clk->exclusive_count)
2954 		clk_core_rate_protect(clk->core);
2955 
2956 	clk_prepare_unlock();
2957 
2958 	return ret;
2959 }
2960 EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
2961 
clk_core_get_scaled_duty_cycle(struct clk_core * core,unsigned int scale)2962 static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
2963 					  unsigned int scale)
2964 {
2965 	struct clk_duty *duty = &core->duty;
2966 	int ret;
2967 
2968 	clk_prepare_lock();
2969 
2970 	ret = clk_core_update_duty_cycle_nolock(core);
2971 	if (!ret)
2972 		ret = mult_frac(scale, duty->num, duty->den);
2973 
2974 	clk_prepare_unlock();
2975 
2976 	return ret;
2977 }
2978 
2979 /**
2980  * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
2981  * @clk: clock signal source
2982  * @scale: scaling factor to be applied to represent the ratio as an integer
2983  *
2984  * Returns the duty cycle ratio of a clock node multiplied by the provided
2985  * scaling factor, or negative errno on error.
2986  */
clk_get_scaled_duty_cycle(struct clk * clk,unsigned int scale)2987 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2988 {
2989 	if (!clk)
2990 		return 0;
2991 
2992 	return clk_core_get_scaled_duty_cycle(clk->core, scale);
2993 }
2994 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
2995 
2996 /**
2997  * clk_is_match - check if two clk's point to the same hardware clock
2998  * @p: clk compared against q
2999  * @q: clk compared against p
3000  *
3001  * Returns true if the two struct clk pointers both point to the same hardware
3002  * clock node. Put differently, returns true if struct clk *p and struct clk *q
3003  * share the same struct clk_core object.
3004  *
3005  * Returns false otherwise. Note that two NULL clks are treated as matching.
3006  */
clk_is_match(const struct clk * p,const struct clk * q)3007 bool clk_is_match(const struct clk *p, const struct clk *q)
3008 {
3009 	/* trivial case: identical struct clk's or both NULL */
3010 	if (p == q)
3011 		return true;
3012 
3013 	/* true if clk->core pointers match. Avoid dereferencing garbage */
3014 	if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
3015 		if (p->core == q->core)
3016 			return true;
3017 
3018 	return false;
3019 }
3020 EXPORT_SYMBOL_GPL(clk_is_match);
3021 
3022 /***        debugfs support        ***/
3023 
3024 #ifdef CONFIG_DEBUG_FS
3025 #include <linux/debugfs.h>
3026 
3027 static struct dentry *rootdir;
3028 static int inited = 0;
3029 static DEFINE_MUTEX(clk_debug_lock);
3030 static HLIST_HEAD(clk_debug_list);
3031 
3032 static struct hlist_head *orphan_list[] = {
3033 	&clk_orphan_list,
3034 	NULL,
3035 };
3036 
clk_summary_show_one(struct seq_file * s,struct clk_core * c,int level)3037 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
3038 				 int level)
3039 {
3040 	int phase;
3041 
3042 	seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
3043 		   level * 3 + 1, "",
3044 		   30 - level * 3, c->name,
3045 		   c->enable_count, c->prepare_count, c->protect_count,
3046 		   clk_core_get_rate_recalc(c),
3047 		   clk_core_get_accuracy_recalc(c));
3048 
3049 	phase = clk_core_get_phase(c);
3050 	if (phase >= 0)
3051 		seq_printf(s, "%5d", phase);
3052 	else
3053 		seq_puts(s, "-----");
3054 
3055 	seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000));
3056 
3057 	if (c->ops->is_enabled)
3058 		seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N');
3059 	else if (!c->ops->enable)
3060 		seq_printf(s, " %9c\n", 'Y');
3061 	else
3062 		seq_printf(s, " %9c\n", '?');
3063 }
3064 
clk_summary_show_subtree(struct seq_file * s,struct clk_core * c,int level)3065 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
3066 				     int level)
3067 {
3068 	struct clk_core *child;
3069 
3070 	clk_summary_show_one(s, c, level);
3071 
3072 	hlist_for_each_entry(child, &c->children, child_node)
3073 		clk_summary_show_subtree(s, child, level + 1);
3074 }
3075 
clk_summary_show(struct seq_file * s,void * data)3076 static int clk_summary_show(struct seq_file *s, void *data)
3077 {
3078 	struct clk_core *c;
3079 	struct hlist_head **lists = (struct hlist_head **)s->private;
3080 
3081 	seq_puts(s, "                                 enable  prepare  protect                                duty  hardware\n");
3082 	seq_puts(s, "   clock                          count    count    count        rate   accuracy phase  cycle    enable\n");
3083 	seq_puts(s, "-------------------------------------------------------------------------------------------------------\n");
3084 
3085 	clk_prepare_lock();
3086 
3087 	for (; *lists; lists++)
3088 		hlist_for_each_entry(c, *lists, child_node)
3089 			clk_summary_show_subtree(s, c, 0);
3090 
3091 	clk_prepare_unlock();
3092 
3093 	return 0;
3094 }
3095 DEFINE_SHOW_ATTRIBUTE(clk_summary);
3096 
clk_dump_one(struct seq_file * s,struct clk_core * c,int level)3097 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
3098 {
3099 	int phase;
3100 	unsigned long min_rate, max_rate;
3101 
3102 	clk_core_get_boundaries(c, &min_rate, &max_rate);
3103 
3104 	/* This should be JSON format, i.e. elements separated with a comma */
3105 	seq_printf(s, "\"%s\": { ", c->name);
3106 	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
3107 	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
3108 	seq_printf(s, "\"protect_count\": %d,", c->protect_count);
3109 	seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
3110 	seq_printf(s, "\"min_rate\": %lu,", min_rate);
3111 	seq_printf(s, "\"max_rate\": %lu,", max_rate);
3112 	seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
3113 	phase = clk_core_get_phase(c);
3114 	if (phase >= 0)
3115 		seq_printf(s, "\"phase\": %d,", phase);
3116 	seq_printf(s, "\"duty_cycle\": %u",
3117 		   clk_core_get_scaled_duty_cycle(c, 100000));
3118 }
3119 
clk_dump_subtree(struct seq_file * s,struct clk_core * c,int level)3120 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
3121 {
3122 	struct clk_core *child;
3123 
3124 	clk_dump_one(s, c, level);
3125 
3126 	hlist_for_each_entry(child, &c->children, child_node) {
3127 		seq_putc(s, ',');
3128 		clk_dump_subtree(s, child, level + 1);
3129 	}
3130 
3131 	seq_putc(s, '}');
3132 }
3133 
clk_dump_show(struct seq_file * s,void * data)3134 static int clk_dump_show(struct seq_file *s, void *data)
3135 {
3136 	struct clk_core *c;
3137 	bool first_node = true;
3138 	struct hlist_head **lists = (struct hlist_head **)s->private;
3139 
3140 	seq_putc(s, '{');
3141 	clk_prepare_lock();
3142 
3143 	for (; *lists; lists++) {
3144 		hlist_for_each_entry(c, *lists, child_node) {
3145 			if (!first_node)
3146 				seq_putc(s, ',');
3147 			first_node = false;
3148 			clk_dump_subtree(s, c, 0);
3149 		}
3150 	}
3151 
3152 	clk_prepare_unlock();
3153 
3154 	seq_puts(s, "}\n");
3155 	return 0;
3156 }
3157 DEFINE_SHOW_ATTRIBUTE(clk_dump);
3158 
3159 #define CLOCK_ALLOW_WRITE_DEBUGFS
3160 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3161 /*
3162  * This can be dangerous, therefore don't provide any real compile time
3163  * configuration option for this feature.
3164  * People who want to use this will need to modify the source code directly.
3165  */
clk_rate_set(void * data,u64 val)3166 static int clk_rate_set(void *data, u64 val)
3167 {
3168 	struct clk_core *core = data;
3169 	int ret;
3170 
3171 	clk_prepare_lock();
3172 	ret = clk_core_set_rate_nolock(core, val);
3173 	clk_prepare_unlock();
3174 
3175 	return ret;
3176 }
3177 
3178 #define clk_rate_mode	0644
3179 
clk_prepare_enable_set(void * data,u64 val)3180 static int clk_prepare_enable_set(void *data, u64 val)
3181 {
3182 	struct clk_core *core = data;
3183 	int ret = 0;
3184 
3185 	if (val)
3186 		ret = clk_prepare_enable(core->hw->clk);
3187 	else
3188 		clk_disable_unprepare(core->hw->clk);
3189 
3190 	return ret;
3191 }
3192 
clk_prepare_enable_get(void * data,u64 * val)3193 static int clk_prepare_enable_get(void *data, u64 *val)
3194 {
3195 	struct clk_core *core = data;
3196 
3197 	*val = core->enable_count && core->prepare_count;
3198 	return 0;
3199 }
3200 
3201 DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
3202 			 clk_prepare_enable_set, "%llu\n");
3203 
3204 #else
3205 #define clk_rate_set	NULL
3206 #define clk_rate_mode	0444
3207 #endif
3208 
clk_rate_get(void * data,u64 * val)3209 static int clk_rate_get(void *data, u64 *val)
3210 {
3211 	struct clk_core *core = data;
3212 
3213 	*val = core->rate;
3214 	return 0;
3215 }
3216 
3217 DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
3218 
3219 static const struct {
3220 	unsigned long flag;
3221 	const char *name;
3222 } clk_flags[] = {
3223 #define ENTRY(f) { f, #f }
3224 	ENTRY(CLK_SET_RATE_GATE),
3225 	ENTRY(CLK_SET_PARENT_GATE),
3226 	ENTRY(CLK_SET_RATE_PARENT),
3227 	ENTRY(CLK_IGNORE_UNUSED),
3228 	ENTRY(CLK_GET_RATE_NOCACHE),
3229 	ENTRY(CLK_SET_RATE_NO_REPARENT),
3230 	ENTRY(CLK_GET_ACCURACY_NOCACHE),
3231 	ENTRY(CLK_RECALC_NEW_RATES),
3232 	ENTRY(CLK_SET_RATE_UNGATE),
3233 	ENTRY(CLK_IS_CRITICAL),
3234 	ENTRY(CLK_OPS_PARENT_ENABLE),
3235 	ENTRY(CLK_DUTY_CYCLE_PARENT),
3236 #undef ENTRY
3237 };
3238 
clk_flags_show(struct seq_file * s,void * data)3239 static int clk_flags_show(struct seq_file *s, void *data)
3240 {
3241 	struct clk_core *core = s->private;
3242 	unsigned long flags = core->flags;
3243 	unsigned int i;
3244 
3245 	for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3246 		if (flags & clk_flags[i].flag) {
3247 			seq_printf(s, "%s\n", clk_flags[i].name);
3248 			flags &= ~clk_flags[i].flag;
3249 		}
3250 	}
3251 	if (flags) {
3252 		/* Unknown flags */
3253 		seq_printf(s, "0x%lx\n", flags);
3254 	}
3255 
3256 	return 0;
3257 }
3258 DEFINE_SHOW_ATTRIBUTE(clk_flags);
3259 
possible_parent_show(struct seq_file * s,struct clk_core * core,unsigned int i,char terminator)3260 static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3261 				 unsigned int i, char terminator)
3262 {
3263 	struct clk_core *parent;
3264 	const char *name = NULL;
3265 
3266 	/*
3267 	 * Go through the following options to fetch a parent's name.
3268 	 *
3269 	 * 1. Fetch the registered parent clock and use its name
3270 	 * 2. Use the global (fallback) name if specified
3271 	 * 3. Use the local fw_name if provided
3272 	 * 4. Fetch parent clock's clock-output-name if DT index was set
3273 	 *
3274 	 * This may still fail in some cases, such as when the parent is
3275 	 * specified directly via a struct clk_hw pointer, but it isn't
3276 	 * registered (yet).
3277 	 */
3278 	parent = clk_core_get_parent_by_index(core, i);
3279 	if (parent) {
3280 		seq_puts(s, parent->name);
3281 	} else if (core->parents[i].name) {
3282 		seq_puts(s, core->parents[i].name);
3283 	} else if (core->parents[i].fw_name) {
3284 		seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3285 	} else {
3286 		if (core->parents[i].index >= 0)
3287 			name = of_clk_get_parent_name(core->of_node, core->parents[i].index);
3288 		if (!name)
3289 			name = "(missing)";
3290 
3291 		seq_puts(s, name);
3292 	}
3293 
3294 	seq_putc(s, terminator);
3295 }
3296 
possible_parents_show(struct seq_file * s,void * data)3297 static int possible_parents_show(struct seq_file *s, void *data)
3298 {
3299 	struct clk_core *core = s->private;
3300 	int i;
3301 
3302 	for (i = 0; i < core->num_parents - 1; i++)
3303 		possible_parent_show(s, core, i, ' ');
3304 
3305 	possible_parent_show(s, core, i, '\n');
3306 
3307 	return 0;
3308 }
3309 DEFINE_SHOW_ATTRIBUTE(possible_parents);
3310 
current_parent_show(struct seq_file * s,void * data)3311 static int current_parent_show(struct seq_file *s, void *data)
3312 {
3313 	struct clk_core *core = s->private;
3314 
3315 	if (core->parent)
3316 		seq_printf(s, "%s\n", core->parent->name);
3317 
3318 	return 0;
3319 }
3320 DEFINE_SHOW_ATTRIBUTE(current_parent);
3321 
clk_duty_cycle_show(struct seq_file * s,void * data)3322 static int clk_duty_cycle_show(struct seq_file *s, void *data)
3323 {
3324 	struct clk_core *core = s->private;
3325 	struct clk_duty *duty = &core->duty;
3326 
3327 	seq_printf(s, "%u/%u\n", duty->num, duty->den);
3328 
3329 	return 0;
3330 }
3331 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3332 
clk_min_rate_show(struct seq_file * s,void * data)3333 static int clk_min_rate_show(struct seq_file *s, void *data)
3334 {
3335 	struct clk_core *core = s->private;
3336 	unsigned long min_rate, max_rate;
3337 
3338 	clk_prepare_lock();
3339 	clk_core_get_boundaries(core, &min_rate, &max_rate);
3340 	clk_prepare_unlock();
3341 	seq_printf(s, "%lu\n", min_rate);
3342 
3343 	return 0;
3344 }
3345 DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
3346 
clk_max_rate_show(struct seq_file * s,void * data)3347 static int clk_max_rate_show(struct seq_file *s, void *data)
3348 {
3349 	struct clk_core *core = s->private;
3350 	unsigned long min_rate, max_rate;
3351 
3352 	clk_prepare_lock();
3353 	clk_core_get_boundaries(core, &min_rate, &max_rate);
3354 	clk_prepare_unlock();
3355 	seq_printf(s, "%lu\n", max_rate);
3356 
3357 	return 0;
3358 }
3359 DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
3360 
clk_debug_create_one(struct clk_core * core,struct dentry * pdentry)3361 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3362 {
3363 	struct dentry *root;
3364 
3365 	if (!core || !pdentry)
3366 		return;
3367 
3368 	root = debugfs_create_dir(core->name, pdentry);
3369 	core->dentry = root;
3370 
3371 	debugfs_create_file("clk_rate", clk_rate_mode, root, core,
3372 			    &clk_rate_fops);
3373 	debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3374 	debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3375 	debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3376 	debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3377 	debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3378 	debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3379 	debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3380 	debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3381 	debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3382 	debugfs_create_file("clk_duty_cycle", 0444, root, core,
3383 			    &clk_duty_cycle_fops);
3384 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3385 	debugfs_create_file("clk_prepare_enable", 0644, root, core,
3386 			    &clk_prepare_enable_fops);
3387 #endif
3388 
3389 	if (core->num_parents > 0)
3390 		debugfs_create_file("clk_parent", 0444, root, core,
3391 				    &current_parent_fops);
3392 
3393 	if (core->num_parents > 1)
3394 		debugfs_create_file("clk_possible_parents", 0444, root, core,
3395 				    &possible_parents_fops);
3396 
3397 	if (core->ops->debug_init)
3398 		core->ops->debug_init(core->hw, core->dentry);
3399 }
3400 
3401 /**
3402  * clk_debug_register - add a clk node to the debugfs clk directory
3403  * @core: the clk being added to the debugfs clk directory
3404  *
3405  * Dynamically adds a clk to the debugfs clk directory if debugfs has been
3406  * initialized.  Otherwise it bails out early since the debugfs clk directory
3407  * will be created lazily by clk_debug_init as part of a late_initcall.
3408  */
clk_debug_register(struct clk_core * core)3409 static void clk_debug_register(struct clk_core *core)
3410 {
3411 	mutex_lock(&clk_debug_lock);
3412 	hlist_add_head(&core->debug_node, &clk_debug_list);
3413 	if (inited)
3414 		clk_debug_create_one(core, rootdir);
3415 	mutex_unlock(&clk_debug_lock);
3416 }
3417 
3418  /**
3419  * clk_debug_unregister - remove a clk node from the debugfs clk directory
3420  * @core: the clk being removed from the debugfs clk directory
3421  *
3422  * Dynamically removes a clk and all its child nodes from the
3423  * debugfs clk directory if clk->dentry points to debugfs created by
3424  * clk_debug_register in __clk_core_init.
3425  */
clk_debug_unregister(struct clk_core * core)3426 static void clk_debug_unregister(struct clk_core *core)
3427 {
3428 	mutex_lock(&clk_debug_lock);
3429 	hlist_del_init(&core->debug_node);
3430 	debugfs_remove_recursive(core->dentry);
3431 	core->dentry = NULL;
3432 	mutex_unlock(&clk_debug_lock);
3433 }
3434 
3435 /**
3436  * clk_debug_init - lazily populate the debugfs clk directory
3437  *
3438  * clks are often initialized very early during boot before memory can be
3439  * dynamically allocated and well before debugfs is setup. This function
3440  * populates the debugfs clk directory once at boot-time when we know that
3441  * debugfs is setup. It should only be called once at boot-time, all other clks
3442  * added dynamically will be done so with clk_debug_register.
3443  */
clk_debug_init(void)3444 static int __init clk_debug_init(void)
3445 {
3446 	struct clk_core *core;
3447 
3448 	rootdir = debugfs_create_dir("clk", NULL);
3449 
3450 	debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3451 			    &clk_summary_fops);
3452 	debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3453 			    &clk_dump_fops);
3454 	debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3455 			    &clk_summary_fops);
3456 	debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3457 			    &clk_dump_fops);
3458 
3459 	mutex_lock(&clk_debug_lock);
3460 	hlist_for_each_entry(core, &clk_debug_list, debug_node)
3461 		clk_debug_create_one(core, rootdir);
3462 
3463 	inited = 1;
3464 	mutex_unlock(&clk_debug_lock);
3465 
3466 	return 0;
3467 }
3468 late_initcall(clk_debug_init);
3469 #else
clk_debug_register(struct clk_core * core)3470 static inline void clk_debug_register(struct clk_core *core) { }
clk_debug_unregister(struct clk_core * core)3471 static inline void clk_debug_unregister(struct clk_core *core)
3472 {
3473 }
3474 #endif
3475 
clk_core_reparent_orphans_nolock(void)3476 static void clk_core_reparent_orphans_nolock(void)
3477 {
3478 	struct clk_core *orphan;
3479 	struct hlist_node *tmp2;
3480 
3481 	/*
3482 	 * walk the list of orphan clocks and reparent any that newly finds a
3483 	 * parent.
3484 	 */
3485 	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3486 		struct clk_core *parent = __clk_init_parent(orphan);
3487 
3488 		/*
3489 		 * We need to use __clk_set_parent_before() and _after() to
3490 		 * to properly migrate any prepare/enable count of the orphan
3491 		 * clock. This is important for CLK_IS_CRITICAL clocks, which
3492 		 * are enabled during init but might not have a parent yet.
3493 		 */
3494 		if (parent) {
3495 			/* update the clk tree topology */
3496 			__clk_set_parent_before(orphan, parent);
3497 			__clk_set_parent_after(orphan, parent, NULL);
3498 			__clk_recalc_accuracies(orphan);
3499 			__clk_recalc_rates(orphan, 0);
3500 			__clk_core_update_orphan_hold_state(orphan);
3501 
3502 			/*
3503 			 * __clk_init_parent() will set the initial req_rate to
3504 			 * 0 if the clock doesn't have clk_ops::recalc_rate and
3505 			 * is an orphan when it's registered.
3506 			 *
3507 			 * 'req_rate' is used by clk_set_rate_range() and
3508 			 * clk_put() to trigger a clk_set_rate() call whenever
3509 			 * the boundaries are modified. Let's make sure
3510 			 * 'req_rate' is set to something non-zero so that
3511 			 * clk_set_rate_range() doesn't drop the frequency.
3512 			 */
3513 			orphan->req_rate = orphan->rate;
3514 		}
3515 	}
3516 }
3517 
3518 /**
3519  * __clk_core_init - initialize the data structures in a struct clk_core
3520  * @core:	clk_core being initialized
3521  *
3522  * Initializes the lists in struct clk_core, queries the hardware for the
3523  * parent and rate and sets them both.
3524  */
__clk_core_init(struct clk_core * core)3525 static int __clk_core_init(struct clk_core *core)
3526 {
3527 	int ret;
3528 	struct clk_core *parent;
3529 	unsigned long rate;
3530 	int phase;
3531 
3532 	if (!core)
3533 		return -EINVAL;
3534 
3535 	clk_prepare_lock();
3536 
3537 	/*
3538 	 * Set hw->core after grabbing the prepare_lock to synchronize with
3539 	 * callers of clk_core_fill_parent_index() where we treat hw->core
3540 	 * being NULL as the clk not being registered yet. This is crucial so
3541 	 * that clks aren't parented until their parent is fully registered.
3542 	 */
3543 	core->hw->core = core;
3544 
3545 	ret = clk_pm_runtime_get(core);
3546 	if (ret)
3547 		goto unlock;
3548 
3549 	/* check to see if a clock with this name is already registered */
3550 	if (clk_core_lookup(core->name)) {
3551 		pr_debug("%s: clk %s already initialized\n",
3552 				__func__, core->name);
3553 		ret = -EEXIST;
3554 		goto out;
3555 	}
3556 
3557 	/* check that clk_ops are sane.  See Documentation/driver-api/clk.rst */
3558 	if (core->ops->set_rate &&
3559 	    !((core->ops->round_rate || core->ops->determine_rate) &&
3560 	      core->ops->recalc_rate)) {
3561 		pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3562 		       __func__, core->name);
3563 		ret = -EINVAL;
3564 		goto out;
3565 	}
3566 
3567 	if (core->ops->set_parent && !core->ops->get_parent) {
3568 		pr_err("%s: %s must implement .get_parent & .set_parent\n",
3569 		       __func__, core->name);
3570 		ret = -EINVAL;
3571 		goto out;
3572 	}
3573 
3574 	if (core->num_parents > 1 && !core->ops->get_parent) {
3575 		pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3576 		       __func__, core->name);
3577 		ret = -EINVAL;
3578 		goto out;
3579 	}
3580 
3581 	if (core->ops->set_rate_and_parent &&
3582 			!(core->ops->set_parent && core->ops->set_rate)) {
3583 		pr_err("%s: %s must implement .set_parent & .set_rate\n",
3584 				__func__, core->name);
3585 		ret = -EINVAL;
3586 		goto out;
3587 	}
3588 
3589 	/*
3590 	 * optional platform-specific magic
3591 	 *
3592 	 * The .init callback is not used by any of the basic clock types, but
3593 	 * exists for weird hardware that must perform initialization magic for
3594 	 * CCF to get an accurate view of clock for any other callbacks. It may
3595 	 * also be used needs to perform dynamic allocations. Such allocation
3596 	 * must be freed in the terminate() callback.
3597 	 * This callback shall not be used to initialize the parameters state,
3598 	 * such as rate, parent, etc ...
3599 	 *
3600 	 * If it exist, this callback should called before any other callback of
3601 	 * the clock
3602 	 */
3603 	if (core->ops->init) {
3604 		ret = core->ops->init(core->hw);
3605 		if (ret)
3606 			goto out;
3607 	}
3608 
3609 	parent = core->parent = __clk_init_parent(core);
3610 
3611 	/*
3612 	 * Populate core->parent if parent has already been clk_core_init'd. If
3613 	 * parent has not yet been clk_core_init'd then place clk in the orphan
3614 	 * list.  If clk doesn't have any parents then place it in the root
3615 	 * clk list.
3616 	 *
3617 	 * Every time a new clk is clk_init'd then we walk the list of orphan
3618 	 * clocks and re-parent any that are children of the clock currently
3619 	 * being clk_init'd.
3620 	 */
3621 	if (parent) {
3622 		hlist_add_head(&core->child_node, &parent->children);
3623 		core->orphan = parent->orphan;
3624 	} else if (!core->num_parents) {
3625 		hlist_add_head(&core->child_node, &clk_root_list);
3626 		core->orphan = false;
3627 	} else {
3628 		hlist_add_head(&core->child_node, &clk_orphan_list);
3629 		core->orphan = true;
3630 	}
3631 
3632 	/*
3633 	 * Set clk's accuracy.  The preferred method is to use
3634 	 * .recalc_accuracy. For simple clocks and lazy developers the default
3635 	 * fallback is to use the parent's accuracy.  If a clock doesn't have a
3636 	 * parent (or is orphaned) then accuracy is set to zero (perfect
3637 	 * clock).
3638 	 */
3639 	if (core->ops->recalc_accuracy)
3640 		core->accuracy = core->ops->recalc_accuracy(core->hw,
3641 					clk_core_get_accuracy_no_lock(parent));
3642 	else if (parent)
3643 		core->accuracy = parent->accuracy;
3644 	else
3645 		core->accuracy = 0;
3646 
3647 	/*
3648 	 * Set clk's phase by clk_core_get_phase() caching the phase.
3649 	 * Since a phase is by definition relative to its parent, just
3650 	 * query the current clock phase, or just assume it's in phase.
3651 	 */
3652 	phase = clk_core_get_phase(core);
3653 	if (phase < 0) {
3654 		ret = phase;
3655 		pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
3656 			core->name);
3657 		goto out;
3658 	}
3659 
3660 	/*
3661 	 * Set clk's duty cycle.
3662 	 */
3663 	clk_core_update_duty_cycle_nolock(core);
3664 
3665 	/*
3666 	 * Set clk's rate.  The preferred method is to use .recalc_rate.  For
3667 	 * simple clocks and lazy developers the default fallback is to use the
3668 	 * parent's rate.  If a clock doesn't have a parent (or is orphaned)
3669 	 * then rate is set to zero.
3670 	 */
3671 	if (core->ops->recalc_rate)
3672 		rate = core->ops->recalc_rate(core->hw,
3673 				clk_core_get_rate_nolock(parent));
3674 	else if (parent)
3675 		rate = parent->rate;
3676 	else
3677 		rate = 0;
3678 	core->rate = core->req_rate = rate;
3679 
3680 	core->boot_enabled = clk_core_is_enabled(core);
3681 
3682 	/*
3683 	 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
3684 	 * don't get accidentally disabled when walking the orphan tree and
3685 	 * reparenting clocks
3686 	 */
3687 	if (core->flags & CLK_IS_CRITICAL) {
3688 		ret = clk_core_prepare(core);
3689 		if (ret) {
3690 			pr_warn("%s: critical clk '%s' failed to prepare\n",
3691 			       __func__, core->name);
3692 			goto out;
3693 		}
3694 
3695 		ret = clk_core_enable_lock(core);
3696 		if (ret) {
3697 			pr_warn("%s: critical clk '%s' failed to enable\n",
3698 			       __func__, core->name);
3699 			clk_core_unprepare(core);
3700 			goto out;
3701 		}
3702 	}
3703 
3704 	clk_core_hold_state(core);
3705 	clk_core_reparent_orphans_nolock();
3706 
3707 
3708 	kref_init(&core->ref);
3709 out:
3710 	clk_pm_runtime_put(core);
3711 unlock:
3712 	if (ret) {
3713 		hlist_del_init(&core->child_node);
3714 		core->hw->core = NULL;
3715 	}
3716 
3717 	clk_prepare_unlock();
3718 
3719 	if (!ret)
3720 		clk_debug_register(core);
3721 
3722 	return ret;
3723 }
3724 
3725 /**
3726  * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
3727  * @core: clk to add consumer to
3728  * @clk: consumer to link to a clk
3729  */
clk_core_link_consumer(struct clk_core * core,struct clk * clk)3730 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3731 {
3732 	clk_prepare_lock();
3733 	hlist_add_head(&clk->clks_node, &core->clks);
3734 	clk_prepare_unlock();
3735 }
3736 
3737 /**
3738  * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
3739  * @clk: consumer to unlink
3740  */
clk_core_unlink_consumer(struct clk * clk)3741 static void clk_core_unlink_consumer(struct clk *clk)
3742 {
3743 	lockdep_assert_held(&prepare_lock);
3744 	hlist_del(&clk->clks_node);
3745 }
3746 
3747 /**
3748  * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
3749  * @core: clk to allocate a consumer for
3750  * @dev_id: string describing device name
3751  * @con_id: connection ID string on device
3752  *
3753  * Returns: clk consumer left unlinked from the consumer list
3754  */
alloc_clk(struct clk_core * core,const char * dev_id,const char * con_id)3755 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3756 			     const char *con_id)
3757 {
3758 	struct clk *clk;
3759 
3760 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3761 	if (!clk)
3762 		return ERR_PTR(-ENOMEM);
3763 
3764 	clk->core = core;
3765 	clk->dev_id = dev_id;
3766 	clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3767 	clk->max_rate = ULONG_MAX;
3768 
3769 	return clk;
3770 }
3771 
3772 /**
3773  * free_clk - Free a clk consumer
3774  * @clk: clk consumer to free
3775  *
3776  * Note, this assumes the clk has been unlinked from the clk_core consumer
3777  * list.
3778  */
free_clk(struct clk * clk)3779 static void free_clk(struct clk *clk)
3780 {
3781 	kfree_const(clk->con_id);
3782 	kfree(clk);
3783 }
3784 
3785 /**
3786  * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
3787  * a clk_hw
3788  * @dev: clk consumer device
3789  * @hw: clk_hw associated with the clk being consumed
3790  * @dev_id: string describing device name
3791  * @con_id: connection ID string on device
3792  *
3793  * This is the main function used to create a clk pointer for use by clk
3794  * consumers. It connects a consumer to the clk_core and clk_hw structures
3795  * used by the framework and clk provider respectively.
3796  */
clk_hw_create_clk(struct device * dev,struct clk_hw * hw,const char * dev_id,const char * con_id)3797 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3798 			      const char *dev_id, const char *con_id)
3799 {
3800 	struct clk *clk;
3801 	struct clk_core *core;
3802 
3803 	/* This is to allow this function to be chained to others */
3804 	if (IS_ERR_OR_NULL(hw))
3805 		return ERR_CAST(hw);
3806 
3807 	core = hw->core;
3808 	clk = alloc_clk(core, dev_id, con_id);
3809 	if (IS_ERR(clk))
3810 		return clk;
3811 	clk->dev = dev;
3812 
3813 	if (!try_module_get(core->owner)) {
3814 		free_clk(clk);
3815 		return ERR_PTR(-ENOENT);
3816 	}
3817 
3818 	kref_get(&core->ref);
3819 	clk_core_link_consumer(core, clk);
3820 
3821 	return clk;
3822 }
3823 
3824 /**
3825  * clk_hw_get_clk - get clk consumer given an clk_hw
3826  * @hw: clk_hw associated with the clk being consumed
3827  * @con_id: connection ID string on device
3828  *
3829  * Returns: new clk consumer
3830  * This is the function to be used by providers which need
3831  * to get a consumer clk and act on the clock element
3832  * Calls to this function must be balanced with calls clk_put()
3833  */
clk_hw_get_clk(struct clk_hw * hw,const char * con_id)3834 struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
3835 {
3836 	struct device *dev = hw->core->dev;
3837 	const char *name = dev ? dev_name(dev) : NULL;
3838 
3839 	return clk_hw_create_clk(dev, hw, name, con_id);
3840 }
3841 EXPORT_SYMBOL(clk_hw_get_clk);
3842 
clk_cpy_name(const char ** dst_p,const char * src,bool must_exist)3843 static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
3844 {
3845 	const char *dst;
3846 
3847 	if (!src) {
3848 		if (must_exist)
3849 			return -EINVAL;
3850 		return 0;
3851 	}
3852 
3853 	*dst_p = dst = kstrdup_const(src, GFP_KERNEL);
3854 	if (!dst)
3855 		return -ENOMEM;
3856 
3857 	return 0;
3858 }
3859 
clk_core_populate_parent_map(struct clk_core * core,const struct clk_init_data * init)3860 static int clk_core_populate_parent_map(struct clk_core *core,
3861 					const struct clk_init_data *init)
3862 {
3863 	u8 num_parents = init->num_parents;
3864 	const char * const *parent_names = init->parent_names;
3865 	const struct clk_hw **parent_hws = init->parent_hws;
3866 	const struct clk_parent_data *parent_data = init->parent_data;
3867 	int i, ret = 0;
3868 	struct clk_parent_map *parents, *parent;
3869 
3870 	if (!num_parents)
3871 		return 0;
3872 
3873 	/*
3874 	 * Avoid unnecessary string look-ups of clk_core's possible parents by
3875 	 * having a cache of names/clk_hw pointers to clk_core pointers.
3876 	 */
3877 	parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
3878 	core->parents = parents;
3879 	if (!parents)
3880 		return -ENOMEM;
3881 
3882 	/* Copy everything over because it might be __initdata */
3883 	for (i = 0, parent = parents; i < num_parents; i++, parent++) {
3884 		parent->index = -1;
3885 		if (parent_names) {
3886 			/* throw a WARN if any entries are NULL */
3887 			WARN(!parent_names[i],
3888 				"%s: invalid NULL in %s's .parent_names\n",
3889 				__func__, core->name);
3890 			ret = clk_cpy_name(&parent->name, parent_names[i],
3891 					   true);
3892 		} else if (parent_data) {
3893 			parent->hw = parent_data[i].hw;
3894 			parent->index = parent_data[i].index;
3895 			ret = clk_cpy_name(&parent->fw_name,
3896 					   parent_data[i].fw_name, false);
3897 			if (!ret)
3898 				ret = clk_cpy_name(&parent->name,
3899 						   parent_data[i].name,
3900 						   false);
3901 		} else if (parent_hws) {
3902 			parent->hw = parent_hws[i];
3903 		} else {
3904 			ret = -EINVAL;
3905 			WARN(1, "Must specify parents if num_parents > 0\n");
3906 		}
3907 
3908 		if (ret) {
3909 			do {
3910 				kfree_const(parents[i].name);
3911 				kfree_const(parents[i].fw_name);
3912 			} while (--i >= 0);
3913 			kfree(parents);
3914 
3915 			return ret;
3916 		}
3917 	}
3918 
3919 	return 0;
3920 }
3921 
clk_core_free_parent_map(struct clk_core * core)3922 static void clk_core_free_parent_map(struct clk_core *core)
3923 {
3924 	int i = core->num_parents;
3925 
3926 	if (!core->num_parents)
3927 		return;
3928 
3929 	while (--i >= 0) {
3930 		kfree_const(core->parents[i].name);
3931 		kfree_const(core->parents[i].fw_name);
3932 	}
3933 
3934 	kfree(core->parents);
3935 }
3936 
3937 static struct clk *
__clk_register(struct device * dev,struct device_node * np,struct clk_hw * hw)3938 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
3939 {
3940 	int ret;
3941 	struct clk_core *core;
3942 	const struct clk_init_data *init = hw->init;
3943 
3944 	/*
3945 	 * The init data is not supposed to be used outside of registration path.
3946 	 * Set it to NULL so that provider drivers can't use it either and so that
3947 	 * we catch use of hw->init early on in the core.
3948 	 */
3949 	hw->init = NULL;
3950 
3951 	core = kzalloc(sizeof(*core), GFP_KERNEL);
3952 	if (!core) {
3953 		ret = -ENOMEM;
3954 		goto fail_out;
3955 	}
3956 
3957 	core->name = kstrdup_const(init->name, GFP_KERNEL);
3958 	if (!core->name) {
3959 		ret = -ENOMEM;
3960 		goto fail_name;
3961 	}
3962 
3963 	if (WARN_ON(!init->ops)) {
3964 		ret = -EINVAL;
3965 		goto fail_ops;
3966 	}
3967 	core->ops = init->ops;
3968 
3969 	if (dev && pm_runtime_enabled(dev))
3970 		core->rpm_enabled = true;
3971 	core->dev = dev;
3972 	core->of_node = np;
3973 	if (dev && dev->driver)
3974 		core->owner = dev->driver->owner;
3975 	core->hw = hw;
3976 	core->flags = init->flags;
3977 	core->num_parents = init->num_parents;
3978 	core->min_rate = 0;
3979 	core->max_rate = ULONG_MAX;
3980 
3981 	ret = clk_core_populate_parent_map(core, init);
3982 	if (ret)
3983 		goto fail_parents;
3984 
3985 	INIT_HLIST_HEAD(&core->clks);
3986 
3987 	/*
3988 	 * Don't call clk_hw_create_clk() here because that would pin the
3989 	 * provider module to itself and prevent it from ever being removed.
3990 	 */
3991 	hw->clk = alloc_clk(core, NULL, NULL);
3992 	if (IS_ERR(hw->clk)) {
3993 		ret = PTR_ERR(hw->clk);
3994 		goto fail_create_clk;
3995 	}
3996 
3997 	clk_core_link_consumer(core, hw->clk);
3998 
3999 	ret = __clk_core_init(core);
4000 	if (!ret)
4001 		return hw->clk;
4002 
4003 	clk_prepare_lock();
4004 	clk_core_unlink_consumer(hw->clk);
4005 	clk_prepare_unlock();
4006 
4007 	free_clk(hw->clk);
4008 	hw->clk = NULL;
4009 
4010 fail_create_clk:
4011 	clk_core_free_parent_map(core);
4012 fail_parents:
4013 fail_ops:
4014 	kfree_const(core->name);
4015 fail_name:
4016 	kfree(core);
4017 fail_out:
4018 	return ERR_PTR(ret);
4019 }
4020 
4021 /**
4022  * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
4023  * @dev: Device to get device node of
4024  *
4025  * Return: device node pointer of @dev, or the device node pointer of
4026  * @dev->parent if dev doesn't have a device node, or NULL if neither
4027  * @dev or @dev->parent have a device node.
4028  */
dev_or_parent_of_node(struct device * dev)4029 static struct device_node *dev_or_parent_of_node(struct device *dev)
4030 {
4031 	struct device_node *np;
4032 
4033 	if (!dev)
4034 		return NULL;
4035 
4036 	np = dev_of_node(dev);
4037 	if (!np)
4038 		np = dev_of_node(dev->parent);
4039 
4040 	return np;
4041 }
4042 
4043 /**
4044  * clk_register - allocate a new clock, register it and return an opaque cookie
4045  * @dev: device that is registering this clock
4046  * @hw: link to hardware-specific clock data
4047  *
4048  * clk_register is the *deprecated* interface for populating the clock tree with
4049  * new clock nodes. Use clk_hw_register() instead.
4050  *
4051  * Returns: a pointer to the newly allocated struct clk which
4052  * cannot be dereferenced by driver code but may be used in conjunction with the
4053  * rest of the clock API.  In the event of an error clk_register will return an
4054  * error code; drivers must test for an error code after calling clk_register.
4055  */
clk_register(struct device * dev,struct clk_hw * hw)4056 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
4057 {
4058 	return __clk_register(dev, dev_or_parent_of_node(dev), hw);
4059 }
4060 EXPORT_SYMBOL_GPL(clk_register);
4061 
4062 /**
4063  * clk_hw_register - register a clk_hw and return an error code
4064  * @dev: device that is registering this clock
4065  * @hw: link to hardware-specific clock data
4066  *
4067  * clk_hw_register is the primary interface for populating the clock tree with
4068  * new clock nodes. It returns an integer equal to zero indicating success or
4069  * less than zero indicating failure. Drivers must test for an error code after
4070  * calling clk_hw_register().
4071  */
clk_hw_register(struct device * dev,struct clk_hw * hw)4072 int clk_hw_register(struct device *dev, struct clk_hw *hw)
4073 {
4074 	return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
4075 			       hw));
4076 }
4077 EXPORT_SYMBOL_GPL(clk_hw_register);
4078 
4079 /*
4080  * of_clk_hw_register - register a clk_hw and return an error code
4081  * @node: device_node of device that is registering this clock
4082  * @hw: link to hardware-specific clock data
4083  *
4084  * of_clk_hw_register() is the primary interface for populating the clock tree
4085  * with new clock nodes when a struct device is not available, but a struct
4086  * device_node is. It returns an integer equal to zero indicating success or
4087  * less than zero indicating failure. Drivers must test for an error code after
4088  * calling of_clk_hw_register().
4089  */
of_clk_hw_register(struct device_node * node,struct clk_hw * hw)4090 int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
4091 {
4092 	return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
4093 }
4094 EXPORT_SYMBOL_GPL(of_clk_hw_register);
4095 
4096 /* Free memory allocated for a clock. */
__clk_release(struct kref * ref)4097 static void __clk_release(struct kref *ref)
4098 {
4099 	struct clk_core *core = container_of(ref, struct clk_core, ref);
4100 
4101 	lockdep_assert_held(&prepare_lock);
4102 
4103 	clk_core_free_parent_map(core);
4104 	kfree_const(core->name);
4105 	kfree(core);
4106 }
4107 
4108 /*
4109  * Empty clk_ops for unregistered clocks. These are used temporarily
4110  * after clk_unregister() was called on a clock and until last clock
4111  * consumer calls clk_put() and the struct clk object is freed.
4112  */
clk_nodrv_prepare_enable(struct clk_hw * hw)4113 static int clk_nodrv_prepare_enable(struct clk_hw *hw)
4114 {
4115 	return -ENXIO;
4116 }
4117 
clk_nodrv_disable_unprepare(struct clk_hw * hw)4118 static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
4119 {
4120 	WARN_ON_ONCE(1);
4121 }
4122 
clk_nodrv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)4123 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
4124 					unsigned long parent_rate)
4125 {
4126 	return -ENXIO;
4127 }
4128 
clk_nodrv_set_parent(struct clk_hw * hw,u8 index)4129 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
4130 {
4131 	return -ENXIO;
4132 }
4133 
4134 static const struct clk_ops clk_nodrv_ops = {
4135 	.enable		= clk_nodrv_prepare_enable,
4136 	.disable	= clk_nodrv_disable_unprepare,
4137 	.prepare	= clk_nodrv_prepare_enable,
4138 	.unprepare	= clk_nodrv_disable_unprepare,
4139 	.set_rate	= clk_nodrv_set_rate,
4140 	.set_parent	= clk_nodrv_set_parent,
4141 };
4142 
clk_core_evict_parent_cache_subtree(struct clk_core * root,struct clk_core * target)4143 static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
4144 						struct clk_core *target)
4145 {
4146 	int i;
4147 	struct clk_core *child;
4148 
4149 	for (i = 0; i < root->num_parents; i++)
4150 		if (root->parents[i].core == target)
4151 			root->parents[i].core = NULL;
4152 
4153 	hlist_for_each_entry(child, &root->children, child_node)
4154 		clk_core_evict_parent_cache_subtree(child, target);
4155 }
4156 
4157 /* Remove this clk from all parent caches */
clk_core_evict_parent_cache(struct clk_core * core)4158 static void clk_core_evict_parent_cache(struct clk_core *core)
4159 {
4160 	struct hlist_head **lists;
4161 	struct clk_core *root;
4162 
4163 	lockdep_assert_held(&prepare_lock);
4164 
4165 	for (lists = all_lists; *lists; lists++)
4166 		hlist_for_each_entry(root, *lists, child_node)
4167 			clk_core_evict_parent_cache_subtree(root, core);
4168 
4169 }
4170 
4171 /**
4172  * clk_unregister - unregister a currently registered clock
4173  * @clk: clock to unregister
4174  */
clk_unregister(struct clk * clk)4175 void clk_unregister(struct clk *clk)
4176 {
4177 	unsigned long flags;
4178 	const struct clk_ops *ops;
4179 
4180 	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4181 		return;
4182 
4183 	clk_debug_unregister(clk->core);
4184 
4185 	clk_prepare_lock();
4186 
4187 	ops = clk->core->ops;
4188 	if (ops == &clk_nodrv_ops) {
4189 		pr_err("%s: unregistered clock: %s\n", __func__,
4190 		       clk->core->name);
4191 		goto unlock;
4192 	}
4193 	/*
4194 	 * Assign empty clock ops for consumers that might still hold
4195 	 * a reference to this clock.
4196 	 */
4197 	flags = clk_enable_lock();
4198 	clk->core->ops = &clk_nodrv_ops;
4199 	clk_enable_unlock(flags);
4200 
4201 	if (ops->terminate)
4202 		ops->terminate(clk->core->hw);
4203 
4204 	if (!hlist_empty(&clk->core->children)) {
4205 		struct clk_core *child;
4206 		struct hlist_node *t;
4207 
4208 		/* Reparent all children to the orphan list. */
4209 		hlist_for_each_entry_safe(child, t, &clk->core->children,
4210 					  child_node)
4211 			clk_core_set_parent_nolock(child, NULL);
4212 	}
4213 
4214 	clk_core_evict_parent_cache(clk->core);
4215 
4216 	hlist_del_init(&clk->core->child_node);
4217 
4218 	if (clk->core->prepare_count)
4219 		pr_warn("%s: unregistering prepared clock: %s\n",
4220 					__func__, clk->core->name);
4221 
4222 	if (clk->core->protect_count)
4223 		pr_warn("%s: unregistering protected clock: %s\n",
4224 					__func__, clk->core->name);
4225 
4226 	kref_put(&clk->core->ref, __clk_release);
4227 	free_clk(clk);
4228 unlock:
4229 	clk_prepare_unlock();
4230 }
4231 EXPORT_SYMBOL_GPL(clk_unregister);
4232 
4233 /**
4234  * clk_hw_unregister - unregister a currently registered clk_hw
4235  * @hw: hardware-specific clock data to unregister
4236  */
clk_hw_unregister(struct clk_hw * hw)4237 void clk_hw_unregister(struct clk_hw *hw)
4238 {
4239 	clk_unregister(hw->clk);
4240 }
4241 EXPORT_SYMBOL_GPL(clk_hw_unregister);
4242 
devm_clk_unregister_cb(struct device * dev,void * res)4243 static void devm_clk_unregister_cb(struct device *dev, void *res)
4244 {
4245 	clk_unregister(*(struct clk **)res);
4246 }
4247 
devm_clk_hw_unregister_cb(struct device * dev,void * res)4248 static void devm_clk_hw_unregister_cb(struct device *dev, void *res)
4249 {
4250 	clk_hw_unregister(*(struct clk_hw **)res);
4251 }
4252 
4253 /**
4254  * devm_clk_register - resource managed clk_register()
4255  * @dev: device that is registering this clock
4256  * @hw: link to hardware-specific clock data
4257  *
4258  * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
4259  *
4260  * Clocks returned from this function are automatically clk_unregister()ed on
4261  * driver detach. See clk_register() for more information.
4262  */
devm_clk_register(struct device * dev,struct clk_hw * hw)4263 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
4264 {
4265 	struct clk *clk;
4266 	struct clk **clkp;
4267 
4268 	clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL);
4269 	if (!clkp)
4270 		return ERR_PTR(-ENOMEM);
4271 
4272 	clk = clk_register(dev, hw);
4273 	if (!IS_ERR(clk)) {
4274 		*clkp = clk;
4275 		devres_add(dev, clkp);
4276 	} else {
4277 		devres_free(clkp);
4278 	}
4279 
4280 	return clk;
4281 }
4282 EXPORT_SYMBOL_GPL(devm_clk_register);
4283 
4284 /**
4285  * devm_clk_hw_register - resource managed clk_hw_register()
4286  * @dev: device that is registering this clock
4287  * @hw: link to hardware-specific clock data
4288  *
4289  * Managed clk_hw_register(). Clocks registered by this function are
4290  * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
4291  * for more information.
4292  */
devm_clk_hw_register(struct device * dev,struct clk_hw * hw)4293 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
4294 {
4295 	struct clk_hw **hwp;
4296 	int ret;
4297 
4298 	hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL);
4299 	if (!hwp)
4300 		return -ENOMEM;
4301 
4302 	ret = clk_hw_register(dev, hw);
4303 	if (!ret) {
4304 		*hwp = hw;
4305 		devres_add(dev, hwp);
4306 	} else {
4307 		devres_free(hwp);
4308 	}
4309 
4310 	return ret;
4311 }
4312 EXPORT_SYMBOL_GPL(devm_clk_hw_register);
4313 
devm_clk_match(struct device * dev,void * res,void * data)4314 static int devm_clk_match(struct device *dev, void *res, void *data)
4315 {
4316 	struct clk *c = res;
4317 	if (WARN_ON(!c))
4318 		return 0;
4319 	return c == data;
4320 }
4321 
devm_clk_hw_match(struct device * dev,void * res,void * data)4322 static int devm_clk_hw_match(struct device *dev, void *res, void *data)
4323 {
4324 	struct clk_hw *hw = res;
4325 
4326 	if (WARN_ON(!hw))
4327 		return 0;
4328 	return hw == data;
4329 }
4330 
4331 /**
4332  * devm_clk_unregister - resource managed clk_unregister()
4333  * @dev: device that is unregistering the clock data
4334  * @clk: clock to unregister
4335  *
4336  * Deallocate a clock allocated with devm_clk_register(). Normally
4337  * this function will not need to be called and the resource management
4338  * code will ensure that the resource is freed.
4339  */
devm_clk_unregister(struct device * dev,struct clk * clk)4340 void devm_clk_unregister(struct device *dev, struct clk *clk)
4341 {
4342 	WARN_ON(devres_release(dev, devm_clk_unregister_cb, devm_clk_match, clk));
4343 }
4344 EXPORT_SYMBOL_GPL(devm_clk_unregister);
4345 
4346 /**
4347  * devm_clk_hw_unregister - resource managed clk_hw_unregister()
4348  * @dev: device that is unregistering the hardware-specific clock data
4349  * @hw: link to hardware-specific clock data
4350  *
4351  * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
4352  * this function will not need to be called and the resource management
4353  * code will ensure that the resource is freed.
4354  */
devm_clk_hw_unregister(struct device * dev,struct clk_hw * hw)4355 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
4356 {
4357 	WARN_ON(devres_release(dev, devm_clk_hw_unregister_cb, devm_clk_hw_match,
4358 				hw));
4359 }
4360 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
4361 
devm_clk_release(struct device * dev,void * res)4362 static void devm_clk_release(struct device *dev, void *res)
4363 {
4364 	clk_put(*(struct clk **)res);
4365 }
4366 
4367 /**
4368  * devm_clk_hw_get_clk - resource managed clk_hw_get_clk()
4369  * @dev: device that is registering this clock
4370  * @hw: clk_hw associated with the clk being consumed
4371  * @con_id: connection ID string on device
4372  *
4373  * Managed clk_hw_get_clk(). Clocks got with this function are
4374  * automatically clk_put() on driver detach. See clk_put()
4375  * for more information.
4376  */
devm_clk_hw_get_clk(struct device * dev,struct clk_hw * hw,const char * con_id)4377 struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
4378 				const char *con_id)
4379 {
4380 	struct clk *clk;
4381 	struct clk **clkp;
4382 
4383 	/* This should not happen because it would mean we have drivers
4384 	 * passing around clk_hw pointers instead of having the caller use
4385 	 * proper clk_get() style APIs
4386 	 */
4387 	WARN_ON_ONCE(dev != hw->core->dev);
4388 
4389 	clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
4390 	if (!clkp)
4391 		return ERR_PTR(-ENOMEM);
4392 
4393 	clk = clk_hw_get_clk(hw, con_id);
4394 	if (!IS_ERR(clk)) {
4395 		*clkp = clk;
4396 		devres_add(dev, clkp);
4397 	} else {
4398 		devres_free(clkp);
4399 	}
4400 
4401 	return clk;
4402 }
4403 EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk);
4404 
4405 /*
4406  * clkdev helpers
4407  */
4408 
__clk_put(struct clk * clk)4409 void __clk_put(struct clk *clk)
4410 {
4411 	struct module *owner;
4412 
4413 	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4414 		return;
4415 
4416 	clk_prepare_lock();
4417 
4418 	/*
4419 	 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
4420 	 * given user should be balanced with calls to clk_rate_exclusive_put()
4421 	 * and by that same consumer
4422 	 */
4423 	if (WARN_ON(clk->exclusive_count)) {
4424 		/* We voiced our concern, let's sanitize the situation */
4425 		clk->core->protect_count -= (clk->exclusive_count - 1);
4426 		clk_core_rate_unprotect(clk->core);
4427 		clk->exclusive_count = 0;
4428 	}
4429 
4430 	hlist_del(&clk->clks_node);
4431 	if (clk->min_rate > clk->core->req_rate ||
4432 	    clk->max_rate < clk->core->req_rate)
4433 		clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4434 
4435 	owner = clk->core->owner;
4436 	kref_put(&clk->core->ref, __clk_release);
4437 
4438 	clk_prepare_unlock();
4439 
4440 	module_put(owner);
4441 
4442 	free_clk(clk);
4443 }
4444 
4445 /***        clk rate change notifiers        ***/
4446 
4447 /**
4448  * clk_notifier_register - add a clk rate change notifier
4449  * @clk: struct clk * to watch
4450  * @nb: struct notifier_block * with callback info
4451  *
4452  * Request notification when clk's rate changes.  This uses an SRCU
4453  * notifier because we want it to block and notifier unregistrations are
4454  * uncommon.  The callbacks associated with the notifier must not
4455  * re-enter into the clk framework by calling any top-level clk APIs;
4456  * this will cause a nested prepare_lock mutex.
4457  *
4458  * In all notification cases (pre, post and abort rate change) the original
4459  * clock rate is passed to the callback via struct clk_notifier_data.old_rate
4460  * and the new frequency is passed via struct clk_notifier_data.new_rate.
4461  *
4462  * clk_notifier_register() must be called from non-atomic context.
4463  * Returns -EINVAL if called with null arguments, -ENOMEM upon
4464  * allocation failure; otherwise, passes along the return value of
4465  * srcu_notifier_chain_register().
4466  */
clk_notifier_register(struct clk * clk,struct notifier_block * nb)4467 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4468 {
4469 	struct clk_notifier *cn;
4470 	int ret = -ENOMEM;
4471 
4472 	if (!clk || !nb)
4473 		return -EINVAL;
4474 
4475 	clk_prepare_lock();
4476 
4477 	/* search the list of notifiers for this clk */
4478 	list_for_each_entry(cn, &clk_notifier_list, node)
4479 		if (cn->clk == clk)
4480 			goto found;
4481 
4482 	/* if clk wasn't in the notifier list, allocate new clk_notifier */
4483 	cn = kzalloc(sizeof(*cn), GFP_KERNEL);
4484 	if (!cn)
4485 		goto out;
4486 
4487 	cn->clk = clk;
4488 	srcu_init_notifier_head(&cn->notifier_head);
4489 
4490 	list_add(&cn->node, &clk_notifier_list);
4491 
4492 found:
4493 	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4494 
4495 	clk->core->notifier_count++;
4496 
4497 out:
4498 	clk_prepare_unlock();
4499 
4500 	return ret;
4501 }
4502 EXPORT_SYMBOL_GPL(clk_notifier_register);
4503 
4504 /**
4505  * clk_notifier_unregister - remove a clk rate change notifier
4506  * @clk: struct clk *
4507  * @nb: struct notifier_block * with callback info
4508  *
4509  * Request no further notification for changes to 'clk' and frees memory
4510  * allocated in clk_notifier_register.
4511  *
4512  * Returns -EINVAL if called with null arguments; otherwise, passes
4513  * along the return value of srcu_notifier_chain_unregister().
4514  */
clk_notifier_unregister(struct clk * clk,struct notifier_block * nb)4515 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4516 {
4517 	struct clk_notifier *cn;
4518 	int ret = -ENOENT;
4519 
4520 	if (!clk || !nb)
4521 		return -EINVAL;
4522 
4523 	clk_prepare_lock();
4524 
4525 	list_for_each_entry(cn, &clk_notifier_list, node) {
4526 		if (cn->clk == clk) {
4527 			ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4528 
4529 			clk->core->notifier_count--;
4530 
4531 			/* XXX the notifier code should handle this better */
4532 			if (!cn->notifier_head.head) {
4533 				srcu_cleanup_notifier_head(&cn->notifier_head);
4534 				list_del(&cn->node);
4535 				kfree(cn);
4536 			}
4537 			break;
4538 		}
4539 	}
4540 
4541 	clk_prepare_unlock();
4542 
4543 	return ret;
4544 }
4545 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
4546 
4547 struct clk_notifier_devres {
4548 	struct clk *clk;
4549 	struct notifier_block *nb;
4550 };
4551 
devm_clk_notifier_release(struct device * dev,void * res)4552 static void devm_clk_notifier_release(struct device *dev, void *res)
4553 {
4554 	struct clk_notifier_devres *devres = res;
4555 
4556 	clk_notifier_unregister(devres->clk, devres->nb);
4557 }
4558 
devm_clk_notifier_register(struct device * dev,struct clk * clk,struct notifier_block * nb)4559 int devm_clk_notifier_register(struct device *dev, struct clk *clk,
4560 			       struct notifier_block *nb)
4561 {
4562 	struct clk_notifier_devres *devres;
4563 	int ret;
4564 
4565 	devres = devres_alloc(devm_clk_notifier_release,
4566 			      sizeof(*devres), GFP_KERNEL);
4567 
4568 	if (!devres)
4569 		return -ENOMEM;
4570 
4571 	ret = clk_notifier_register(clk, nb);
4572 	if (!ret) {
4573 		devres->clk = clk;
4574 		devres->nb = nb;
4575 		devres_add(dev, devres);
4576 	} else {
4577 		devres_free(devres);
4578 	}
4579 
4580 	return ret;
4581 }
4582 EXPORT_SYMBOL_GPL(devm_clk_notifier_register);
4583 
4584 #ifdef CONFIG_OF
clk_core_reparent_orphans(void)4585 static void clk_core_reparent_orphans(void)
4586 {
4587 	clk_prepare_lock();
4588 	clk_core_reparent_orphans_nolock();
4589 	clk_prepare_unlock();
4590 }
4591 
4592 /**
4593  * struct of_clk_provider - Clock provider registration structure
4594  * @link: Entry in global list of clock providers
4595  * @node: Pointer to device tree node of clock provider
4596  * @get: Get clock callback.  Returns NULL or a struct clk for the
4597  *       given clock specifier
4598  * @get_hw: Get clk_hw callback.  Returns NULL, ERR_PTR or a
4599  *       struct clk_hw for the given clock specifier
4600  * @data: context pointer to be passed into @get callback
4601  */
4602 struct of_clk_provider {
4603 	struct list_head link;
4604 
4605 	struct device_node *node;
4606 	struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4607 	struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
4608 	void *data;
4609 };
4610 
4611 extern struct of_device_id __clk_of_table;
4612 static const struct of_device_id __clk_of_table_sentinel
4613 	__used __section("__clk_of_table_end");
4614 
4615 static LIST_HEAD(of_clk_providers);
4616 static DEFINE_MUTEX(of_clk_mutex);
4617 
of_clk_src_simple_get(struct of_phandle_args * clkspec,void * data)4618 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4619 				     void *data)
4620 {
4621 	return data;
4622 }
4623 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4624 
of_clk_hw_simple_get(struct of_phandle_args * clkspec,void * data)4625 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4626 {
4627 	return data;
4628 }
4629 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4630 
of_clk_src_onecell_get(struct of_phandle_args * clkspec,void * data)4631 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4632 {
4633 	struct clk_onecell_data *clk_data = data;
4634 	unsigned int idx = clkspec->args[0];
4635 
4636 	if (idx >= clk_data->clk_num) {
4637 		pr_err("%s: invalid clock index %u\n", __func__, idx);
4638 		return ERR_PTR(-EINVAL);
4639 	}
4640 
4641 	return clk_data->clks[idx];
4642 }
4643 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4644 
4645 struct clk_hw *
of_clk_hw_onecell_get(struct of_phandle_args * clkspec,void * data)4646 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4647 {
4648 	struct clk_hw_onecell_data *hw_data = data;
4649 	unsigned int idx = clkspec->args[0];
4650 
4651 	if (idx >= hw_data->num) {
4652 		pr_err("%s: invalid index %u\n", __func__, idx);
4653 		return ERR_PTR(-EINVAL);
4654 	}
4655 
4656 	return hw_data->hws[idx];
4657 }
4658 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4659 
4660 /**
4661  * of_clk_add_provider() - Register a clock provider for a node
4662  * @np: Device node pointer associated with clock provider
4663  * @clk_src_get: callback for decoding clock
4664  * @data: context pointer for @clk_src_get callback.
4665  *
4666  * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
4667  */
of_clk_add_provider(struct device_node * np,struct clk * (* clk_src_get)(struct of_phandle_args * clkspec,void * data),void * data)4668 int of_clk_add_provider(struct device_node *np,
4669 			struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4670 						   void *data),
4671 			void *data)
4672 {
4673 	struct of_clk_provider *cp;
4674 	int ret;
4675 
4676 	if (!np)
4677 		return 0;
4678 
4679 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4680 	if (!cp)
4681 		return -ENOMEM;
4682 
4683 	cp->node = of_node_get(np);
4684 	cp->data = data;
4685 	cp->get = clk_src_get;
4686 
4687 	mutex_lock(&of_clk_mutex);
4688 	list_add(&cp->link, &of_clk_providers);
4689 	mutex_unlock(&of_clk_mutex);
4690 	pr_debug("Added clock from %pOF\n", np);
4691 
4692 	clk_core_reparent_orphans();
4693 
4694 	ret = of_clk_set_defaults(np, true);
4695 	if (ret < 0)
4696 		of_clk_del_provider(np);
4697 
4698 	fwnode_dev_initialized(&np->fwnode, true);
4699 
4700 	return ret;
4701 }
4702 EXPORT_SYMBOL_GPL(of_clk_add_provider);
4703 
4704 /**
4705  * of_clk_add_hw_provider() - Register a clock provider for a node
4706  * @np: Device node pointer associated with clock provider
4707  * @get: callback for decoding clk_hw
4708  * @data: context pointer for @get callback.
4709  */
of_clk_add_hw_provider(struct device_node * np,struct clk_hw * (* get)(struct of_phandle_args * clkspec,void * data),void * data)4710 int of_clk_add_hw_provider(struct device_node *np,
4711 			   struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4712 						 void *data),
4713 			   void *data)
4714 {
4715 	struct of_clk_provider *cp;
4716 	int ret;
4717 
4718 	if (!np)
4719 		return 0;
4720 
4721 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4722 	if (!cp)
4723 		return -ENOMEM;
4724 
4725 	cp->node = of_node_get(np);
4726 	cp->data = data;
4727 	cp->get_hw = get;
4728 
4729 	mutex_lock(&of_clk_mutex);
4730 	list_add(&cp->link, &of_clk_providers);
4731 	mutex_unlock(&of_clk_mutex);
4732 	pr_debug("Added clk_hw provider from %pOF\n", np);
4733 
4734 	clk_core_reparent_orphans();
4735 
4736 	ret = of_clk_set_defaults(np, true);
4737 	if (ret < 0)
4738 		of_clk_del_provider(np);
4739 
4740 	fwnode_dev_initialized(&np->fwnode, true);
4741 
4742 	return ret;
4743 }
4744 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
4745 
devm_of_clk_release_provider(struct device * dev,void * res)4746 static void devm_of_clk_release_provider(struct device *dev, void *res)
4747 {
4748 	of_clk_del_provider(*(struct device_node **)res);
4749 }
4750 
4751 /*
4752  * We allow a child device to use its parent device as the clock provider node
4753  * for cases like MFD sub-devices where the child device driver wants to use
4754  * devm_*() APIs but not list the device in DT as a sub-node.
4755  */
get_clk_provider_node(struct device * dev)4756 static struct device_node *get_clk_provider_node(struct device *dev)
4757 {
4758 	struct device_node *np, *parent_np;
4759 
4760 	np = dev->of_node;
4761 	parent_np = dev->parent ? dev->parent->of_node : NULL;
4762 
4763 	if (!of_find_property(np, "#clock-cells", NULL))
4764 		if (of_find_property(parent_np, "#clock-cells", NULL))
4765 			np = parent_np;
4766 
4767 	return np;
4768 }
4769 
4770 /**
4771  * devm_of_clk_add_hw_provider() - Managed clk provider node registration
4772  * @dev: Device acting as the clock provider (used for DT node and lifetime)
4773  * @get: callback for decoding clk_hw
4774  * @data: context pointer for @get callback
4775  *
4776  * Registers clock provider for given device's node. If the device has no DT
4777  * node or if the device node lacks of clock provider information (#clock-cells)
4778  * then the parent device's node is scanned for this information. If parent node
4779  * has the #clock-cells then it is used in registration. Provider is
4780  * automatically released at device exit.
4781  *
4782  * Return: 0 on success or an errno on failure.
4783  */
devm_of_clk_add_hw_provider(struct device * dev,struct clk_hw * (* get)(struct of_phandle_args * clkspec,void * data),void * data)4784 int devm_of_clk_add_hw_provider(struct device *dev,
4785 			struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4786 					      void *data),
4787 			void *data)
4788 {
4789 	struct device_node **ptr, *np;
4790 	int ret;
4791 
4792 	ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
4793 			   GFP_KERNEL);
4794 	if (!ptr)
4795 		return -ENOMEM;
4796 
4797 	np = get_clk_provider_node(dev);
4798 	ret = of_clk_add_hw_provider(np, get, data);
4799 	if (!ret) {
4800 		*ptr = np;
4801 		devres_add(dev, ptr);
4802 	} else {
4803 		devres_free(ptr);
4804 	}
4805 
4806 	return ret;
4807 }
4808 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
4809 
4810 /**
4811  * of_clk_del_provider() - Remove a previously registered clock provider
4812  * @np: Device node pointer associated with clock provider
4813  */
of_clk_del_provider(struct device_node * np)4814 void of_clk_del_provider(struct device_node *np)
4815 {
4816 	struct of_clk_provider *cp;
4817 
4818 	if (!np)
4819 		return;
4820 
4821 	mutex_lock(&of_clk_mutex);
4822 	list_for_each_entry(cp, &of_clk_providers, link) {
4823 		if (cp->node == np) {
4824 			list_del(&cp->link);
4825 			fwnode_dev_initialized(&np->fwnode, false);
4826 			of_node_put(cp->node);
4827 			kfree(cp);
4828 			break;
4829 		}
4830 	}
4831 	mutex_unlock(&of_clk_mutex);
4832 }
4833 EXPORT_SYMBOL_GPL(of_clk_del_provider);
4834 
devm_clk_provider_match(struct device * dev,void * res,void * data)4835 static int devm_clk_provider_match(struct device *dev, void *res, void *data)
4836 {
4837 	struct device_node **np = res;
4838 
4839 	if (WARN_ON(!np || !*np))
4840 		return 0;
4841 
4842 	return *np == data;
4843 }
4844 
4845 /**
4846  * devm_of_clk_del_provider() - Remove clock provider registered using devm
4847  * @dev: Device to whose lifetime the clock provider was bound
4848  */
devm_of_clk_del_provider(struct device * dev)4849 void devm_of_clk_del_provider(struct device *dev)
4850 {
4851 	int ret;
4852 	struct device_node *np = get_clk_provider_node(dev);
4853 
4854 	ret = devres_release(dev, devm_of_clk_release_provider,
4855 			     devm_clk_provider_match, np);
4856 
4857 	WARN_ON(ret);
4858 }
4859 EXPORT_SYMBOL(devm_of_clk_del_provider);
4860 
4861 /**
4862  * of_parse_clkspec() - Parse a DT clock specifier for a given device node
4863  * @np: device node to parse clock specifier from
4864  * @index: index of phandle to parse clock out of. If index < 0, @name is used
4865  * @name: clock name to find and parse. If name is NULL, the index is used
4866  * @out_args: Result of parsing the clock specifier
4867  *
4868  * Parses a device node's "clocks" and "clock-names" properties to find the
4869  * phandle and cells for the index or name that is desired. The resulting clock
4870  * specifier is placed into @out_args, or an errno is returned when there's a
4871  * parsing error. The @index argument is ignored if @name is non-NULL.
4872  *
4873  * Example:
4874  *
4875  * phandle1: clock-controller@1 {
4876  *	#clock-cells = <2>;
4877  * }
4878  *
4879  * phandle2: clock-controller@2 {
4880  *	#clock-cells = <1>;
4881  * }
4882  *
4883  * clock-consumer@3 {
4884  *	clocks = <&phandle1 1 2 &phandle2 3>;
4885  *	clock-names = "name1", "name2";
4886  * }
4887  *
4888  * To get a device_node for `clock-controller@2' node you may call this
4889  * function a few different ways:
4890  *
4891  *   of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
4892  *   of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
4893  *   of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
4894  *
4895  * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
4896  * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
4897  * the "clock-names" property of @np.
4898  */
of_parse_clkspec(const struct device_node * np,int index,const char * name,struct of_phandle_args * out_args)4899 static int of_parse_clkspec(const struct device_node *np, int index,
4900 			    const char *name, struct of_phandle_args *out_args)
4901 {
4902 	int ret = -ENOENT;
4903 
4904 	/* Walk up the tree of devices looking for a clock property that matches */
4905 	while (np) {
4906 		/*
4907 		 * For named clocks, first look up the name in the
4908 		 * "clock-names" property.  If it cannot be found, then index
4909 		 * will be an error code and of_parse_phandle_with_args() will
4910 		 * return -EINVAL.
4911 		 */
4912 		if (name)
4913 			index = of_property_match_string(np, "clock-names", name);
4914 		ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4915 						 index, out_args);
4916 		if (!ret)
4917 			break;
4918 		if (name && index >= 0)
4919 			break;
4920 
4921 		/*
4922 		 * No matching clock found on this node.  If the parent node
4923 		 * has a "clock-ranges" property, then we can try one of its
4924 		 * clocks.
4925 		 */
4926 		np = np->parent;
4927 		if (np && !of_get_property(np, "clock-ranges", NULL))
4928 			break;
4929 		index = 0;
4930 	}
4931 
4932 	return ret;
4933 }
4934 
4935 static struct clk_hw *
__of_clk_get_hw_from_provider(struct of_clk_provider * provider,struct of_phandle_args * clkspec)4936 __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
4937 			      struct of_phandle_args *clkspec)
4938 {
4939 	struct clk *clk;
4940 
4941 	if (provider->get_hw)
4942 		return provider->get_hw(clkspec, provider->data);
4943 
4944 	clk = provider->get(clkspec, provider->data);
4945 	if (IS_ERR(clk))
4946 		return ERR_CAST(clk);
4947 	return __clk_get_hw(clk);
4948 }
4949 
4950 static struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args * clkspec)4951 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
4952 {
4953 	struct of_clk_provider *provider;
4954 	struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
4955 
4956 	if (!clkspec)
4957 		return ERR_PTR(-EINVAL);
4958 
4959 	mutex_lock(&of_clk_mutex);
4960 	list_for_each_entry(provider, &of_clk_providers, link) {
4961 		if (provider->node == clkspec->np) {
4962 			hw = __of_clk_get_hw_from_provider(provider, clkspec);
4963 			if (!IS_ERR(hw))
4964 				break;
4965 		}
4966 	}
4967 	mutex_unlock(&of_clk_mutex);
4968 
4969 	return hw;
4970 }
4971 
4972 /**
4973  * of_clk_get_from_provider() - Lookup a clock from a clock provider
4974  * @clkspec: pointer to a clock specifier data structure
4975  *
4976  * This function looks up a struct clk from the registered list of clock
4977  * providers, an input is a clock specifier data structure as returned
4978  * from the of_parse_phandle_with_args() function call.
4979  */
of_clk_get_from_provider(struct of_phandle_args * clkspec)4980 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4981 {
4982 	struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
4983 
4984 	return clk_hw_create_clk(NULL, hw, NULL, __func__);
4985 }
4986 EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
4987 
of_clk_get_hw(struct device_node * np,int index,const char * con_id)4988 struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
4989 			     const char *con_id)
4990 {
4991 	int ret;
4992 	struct clk_hw *hw;
4993 	struct of_phandle_args clkspec;
4994 
4995 	ret = of_parse_clkspec(np, index, con_id, &clkspec);
4996 	if (ret)
4997 		return ERR_PTR(ret);
4998 
4999 	hw = of_clk_get_hw_from_clkspec(&clkspec);
5000 	of_node_put(clkspec.np);
5001 
5002 	return hw;
5003 }
5004 
__of_clk_get(struct device_node * np,int index,const char * dev_id,const char * con_id)5005 static struct clk *__of_clk_get(struct device_node *np,
5006 				int index, const char *dev_id,
5007 				const char *con_id)
5008 {
5009 	struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
5010 
5011 	return clk_hw_create_clk(NULL, hw, dev_id, con_id);
5012 }
5013 
of_clk_get(struct device_node * np,int index)5014 struct clk *of_clk_get(struct device_node *np, int index)
5015 {
5016 	return __of_clk_get(np, index, np->full_name, NULL);
5017 }
5018 EXPORT_SYMBOL(of_clk_get);
5019 
5020 /**
5021  * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
5022  * @np: pointer to clock consumer node
5023  * @name: name of consumer's clock input, or NULL for the first clock reference
5024  *
5025  * This function parses the clocks and clock-names properties,
5026  * and uses them to look up the struct clk from the registered list of clock
5027  * providers.
5028  */
of_clk_get_by_name(struct device_node * np,const char * name)5029 struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
5030 {
5031 	if (!np)
5032 		return ERR_PTR(-ENOENT);
5033 
5034 	return __of_clk_get(np, 0, np->full_name, name);
5035 }
5036 EXPORT_SYMBOL(of_clk_get_by_name);
5037 
5038 /**
5039  * of_clk_get_parent_count() - Count the number of clocks a device node has
5040  * @np: device node to count
5041  *
5042  * Returns: The number of clocks that are possible parents of this node
5043  */
of_clk_get_parent_count(const struct device_node * np)5044 unsigned int of_clk_get_parent_count(const struct device_node *np)
5045 {
5046 	int count;
5047 
5048 	count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
5049 	if (count < 0)
5050 		return 0;
5051 
5052 	return count;
5053 }
5054 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
5055 
of_clk_get_parent_name(const struct device_node * np,int index)5056 const char *of_clk_get_parent_name(const struct device_node *np, int index)
5057 {
5058 	struct of_phandle_args clkspec;
5059 	struct property *prop;
5060 	const char *clk_name;
5061 	const __be32 *vp;
5062 	u32 pv;
5063 	int rc;
5064 	int count;
5065 	struct clk *clk;
5066 
5067 	rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
5068 					&clkspec);
5069 	if (rc)
5070 		return NULL;
5071 
5072 	index = clkspec.args_count ? clkspec.args[0] : 0;
5073 	count = 0;
5074 
5075 	/* if there is an indices property, use it to transfer the index
5076 	 * specified into an array offset for the clock-output-names property.
5077 	 */
5078 	of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
5079 		if (index == pv) {
5080 			index = count;
5081 			break;
5082 		}
5083 		count++;
5084 	}
5085 	/* We went off the end of 'clock-indices' without finding it */
5086 	if (prop && !vp)
5087 		return NULL;
5088 
5089 	if (of_property_read_string_index(clkspec.np, "clock-output-names",
5090 					  index,
5091 					  &clk_name) < 0) {
5092 		/*
5093 		 * Best effort to get the name if the clock has been
5094 		 * registered with the framework. If the clock isn't
5095 		 * registered, we return the node name as the name of
5096 		 * the clock as long as #clock-cells = 0.
5097 		 */
5098 		clk = of_clk_get_from_provider(&clkspec);
5099 		if (IS_ERR(clk)) {
5100 			if (clkspec.args_count == 0)
5101 				clk_name = clkspec.np->name;
5102 			else
5103 				clk_name = NULL;
5104 		} else {
5105 			clk_name = __clk_get_name(clk);
5106 			clk_put(clk);
5107 		}
5108 	}
5109 
5110 
5111 	of_node_put(clkspec.np);
5112 	return clk_name;
5113 }
5114 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
5115 
5116 /**
5117  * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
5118  * number of parents
5119  * @np: Device node pointer associated with clock provider
5120  * @parents: pointer to char array that hold the parents' names
5121  * @size: size of the @parents array
5122  *
5123  * Return: number of parents for the clock node.
5124  */
of_clk_parent_fill(struct device_node * np,const char ** parents,unsigned int size)5125 int of_clk_parent_fill(struct device_node *np, const char **parents,
5126 		       unsigned int size)
5127 {
5128 	unsigned int i = 0;
5129 
5130 	while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
5131 		i++;
5132 
5133 	return i;
5134 }
5135 EXPORT_SYMBOL_GPL(of_clk_parent_fill);
5136 
5137 struct clock_provider {
5138 	void (*clk_init_cb)(struct device_node *);
5139 	struct device_node *np;
5140 	struct list_head node;
5141 };
5142 
5143 /*
5144  * This function looks for a parent clock. If there is one, then it
5145  * checks that the provider for this parent clock was initialized, in
5146  * this case the parent clock will be ready.
5147  */
parent_ready(struct device_node * np)5148 static int parent_ready(struct device_node *np)
5149 {
5150 	int i = 0;
5151 
5152 	while (true) {
5153 		struct clk *clk = of_clk_get(np, i);
5154 
5155 		/* this parent is ready we can check the next one */
5156 		if (!IS_ERR(clk)) {
5157 			clk_put(clk);
5158 			i++;
5159 			continue;
5160 		}
5161 
5162 		/* at least one parent is not ready, we exit now */
5163 		if (PTR_ERR(clk) == -EPROBE_DEFER)
5164 			return 0;
5165 
5166 		/*
5167 		 * Here we make assumption that the device tree is
5168 		 * written correctly. So an error means that there is
5169 		 * no more parent. As we didn't exit yet, then the
5170 		 * previous parent are ready. If there is no clock
5171 		 * parent, no need to wait for them, then we can
5172 		 * consider their absence as being ready
5173 		 */
5174 		return 1;
5175 	}
5176 }
5177 
5178 /**
5179  * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
5180  * @np: Device node pointer associated with clock provider
5181  * @index: clock index
5182  * @flags: pointer to top-level framework flags
5183  *
5184  * Detects if the clock-critical property exists and, if so, sets the
5185  * corresponding CLK_IS_CRITICAL flag.
5186  *
5187  * Do not use this function. It exists only for legacy Device Tree
5188  * bindings, such as the one-clock-per-node style that are outdated.
5189  * Those bindings typically put all clock data into .dts and the Linux
5190  * driver has no clock data, thus making it impossible to set this flag
5191  * correctly from the driver. Only those drivers may call
5192  * of_clk_detect_critical from their setup functions.
5193  *
5194  * Return: error code or zero on success
5195  */
of_clk_detect_critical(struct device_node * np,int index,unsigned long * flags)5196 int of_clk_detect_critical(struct device_node *np, int index,
5197 			   unsigned long *flags)
5198 {
5199 	struct property *prop;
5200 	const __be32 *cur;
5201 	uint32_t idx;
5202 
5203 	if (!np || !flags)
5204 		return -EINVAL;
5205 
5206 	of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
5207 		if (index == idx)
5208 			*flags |= CLK_IS_CRITICAL;
5209 
5210 	return 0;
5211 }
5212 
5213 /**
5214  * of_clk_init() - Scan and init clock providers from the DT
5215  * @matches: array of compatible values and init functions for providers.
5216  *
5217  * This function scans the device tree for matching clock providers
5218  * and calls their initialization functions. It also does it by trying
5219  * to follow the dependencies.
5220  */
of_clk_init(const struct of_device_id * matches)5221 void __init of_clk_init(const struct of_device_id *matches)
5222 {
5223 	const struct of_device_id *match;
5224 	struct device_node *np;
5225 	struct clock_provider *clk_provider, *next;
5226 	bool is_init_done;
5227 	bool force = false;
5228 	LIST_HEAD(clk_provider_list);
5229 
5230 	if (!matches)
5231 		matches = &__clk_of_table;
5232 
5233 	/* First prepare the list of the clocks providers */
5234 	for_each_matching_node_and_match(np, matches, &match) {
5235 		struct clock_provider *parent;
5236 
5237 		if (!of_device_is_available(np))
5238 			continue;
5239 
5240 		parent = kzalloc(sizeof(*parent), GFP_KERNEL);
5241 		if (!parent) {
5242 			list_for_each_entry_safe(clk_provider, next,
5243 						 &clk_provider_list, node) {
5244 				list_del(&clk_provider->node);
5245 				of_node_put(clk_provider->np);
5246 				kfree(clk_provider);
5247 			}
5248 			of_node_put(np);
5249 			return;
5250 		}
5251 
5252 		parent->clk_init_cb = match->data;
5253 		parent->np = of_node_get(np);
5254 		list_add_tail(&parent->node, &clk_provider_list);
5255 	}
5256 
5257 	while (!list_empty(&clk_provider_list)) {
5258 		is_init_done = false;
5259 		list_for_each_entry_safe(clk_provider, next,
5260 					&clk_provider_list, node) {
5261 			if (force || parent_ready(clk_provider->np)) {
5262 
5263 				/* Don't populate platform devices */
5264 				of_node_set_flag(clk_provider->np,
5265 						 OF_POPULATED);
5266 
5267 				clk_provider->clk_init_cb(clk_provider->np);
5268 				of_clk_set_defaults(clk_provider->np, true);
5269 
5270 				list_del(&clk_provider->node);
5271 				of_node_put(clk_provider->np);
5272 				kfree(clk_provider);
5273 				is_init_done = true;
5274 			}
5275 		}
5276 
5277 		/*
5278 		 * We didn't manage to initialize any of the
5279 		 * remaining providers during the last loop, so now we
5280 		 * initialize all the remaining ones unconditionally
5281 		 * in case the clock parent was not mandatory
5282 		 */
5283 		if (!is_init_done)
5284 			force = true;
5285 	}
5286 }
5287 #endif
5288