1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5 *
6 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
7 */
8
9 #include <linux/clk.h>
10 #include <linux/clk-provider.h>
11 #include <linux/clk/clk-conf.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/spinlock.h>
15 #include <linux/err.h>
16 #include <linux/list.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/sched.h>
23 #include <linux/clkdev.h>
24
25 #include "clk.h"
26
27 static DEFINE_SPINLOCK(enable_lock);
28 static DEFINE_MUTEX(prepare_lock);
29
30 static struct task_struct *prepare_owner;
31 static struct task_struct *enable_owner;
32
33 static int prepare_refcnt;
34 static int enable_refcnt;
35
36 static HLIST_HEAD(clk_root_list);
37 static HLIST_HEAD(clk_orphan_list);
38 static LIST_HEAD(clk_notifier_list);
39
40 static struct hlist_head *all_lists[] = {
41 &clk_root_list,
42 &clk_orphan_list,
43 NULL,
44 };
45
46 /*** private data structures ***/
47
48 struct clk_parent_map {
49 const struct clk_hw *hw;
50 struct clk_core *core;
51 const char *fw_name;
52 const char *name;
53 int index;
54 };
55
56 struct clk_core {
57 const char *name;
58 const struct clk_ops *ops;
59 struct clk_hw *hw;
60 struct module *owner;
61 struct device *dev;
62 struct device_node *of_node;
63 struct clk_core *parent;
64 struct clk_parent_map *parents;
65 u8 num_parents;
66 u8 new_parent_index;
67 unsigned long rate;
68 unsigned long req_rate;
69 unsigned long new_rate;
70 struct clk_core *new_parent;
71 struct clk_core *new_child;
72 unsigned long flags;
73 bool orphan;
74 bool rpm_enabled;
75 bool need_sync;
76 bool boot_enabled;
77 unsigned int enable_count;
78 unsigned int prepare_count;
79 unsigned int protect_count;
80 unsigned long min_rate;
81 unsigned long max_rate;
82 unsigned long accuracy;
83 int phase;
84 struct clk_duty duty;
85 struct hlist_head children;
86 struct hlist_node child_node;
87 struct hlist_head clks;
88 unsigned int notifier_count;
89 #ifdef CONFIG_DEBUG_FS
90 struct dentry *dentry;
91 struct hlist_node debug_node;
92 #endif
93 struct kref ref;
94 };
95
96 #define CREATE_TRACE_POINTS
97 #include <trace/events/clk.h>
98
99 struct clk {
100 struct clk_core *core;
101 struct device *dev;
102 const char *dev_id;
103 const char *con_id;
104 unsigned long min_rate;
105 unsigned long max_rate;
106 unsigned int exclusive_count;
107 struct hlist_node clks_node;
108 };
109
110 /*** runtime pm ***/
clk_pm_runtime_get(struct clk_core * core)111 static int clk_pm_runtime_get(struct clk_core *core)
112 {
113 int ret;
114
115 if (!core->rpm_enabled)
116 return 0;
117
118 ret = pm_runtime_get_sync(core->dev);
119 return ret < 0 ? ret : 0;
120 }
121
clk_pm_runtime_put(struct clk_core * core)122 static void clk_pm_runtime_put(struct clk_core *core)
123 {
124 if (!core->rpm_enabled)
125 return;
126
127 pm_runtime_put_sync(core->dev);
128 }
129
130 /*** locking ***/
clk_prepare_lock(void)131 static void clk_prepare_lock(void)
132 {
133 if (!mutex_trylock(&prepare_lock)) {
134 if (prepare_owner == current) {
135 prepare_refcnt++;
136 return;
137 }
138 mutex_lock(&prepare_lock);
139 }
140 WARN_ON_ONCE(prepare_owner != NULL);
141 WARN_ON_ONCE(prepare_refcnt != 0);
142 prepare_owner = current;
143 prepare_refcnt = 1;
144 }
145
clk_prepare_unlock(void)146 static void clk_prepare_unlock(void)
147 {
148 WARN_ON_ONCE(prepare_owner != current);
149 WARN_ON_ONCE(prepare_refcnt == 0);
150
151 if (--prepare_refcnt)
152 return;
153 prepare_owner = NULL;
154 mutex_unlock(&prepare_lock);
155 }
156
clk_enable_lock(void)157 static unsigned long clk_enable_lock(void)
158 __acquires(enable_lock)
159 {
160 unsigned long flags;
161
162 /*
163 * On UP systems, spin_trylock_irqsave() always returns true, even if
164 * we already hold the lock. So, in that case, we rely only on
165 * reference counting.
166 */
167 if (!IS_ENABLED(CONFIG_SMP) ||
168 !spin_trylock_irqsave(&enable_lock, flags)) {
169 if (enable_owner == current) {
170 enable_refcnt++;
171 __acquire(enable_lock);
172 if (!IS_ENABLED(CONFIG_SMP))
173 local_save_flags(flags);
174 return flags;
175 }
176 spin_lock_irqsave(&enable_lock, flags);
177 }
178 WARN_ON_ONCE(enable_owner != NULL);
179 WARN_ON_ONCE(enable_refcnt != 0);
180 enable_owner = current;
181 enable_refcnt = 1;
182 return flags;
183 }
184
clk_enable_unlock(unsigned long flags)185 static void clk_enable_unlock(unsigned long flags)
186 __releases(enable_lock)
187 {
188 WARN_ON_ONCE(enable_owner != current);
189 WARN_ON_ONCE(enable_refcnt == 0);
190
191 if (--enable_refcnt) {
192 __release(enable_lock);
193 return;
194 }
195 enable_owner = NULL;
196 spin_unlock_irqrestore(&enable_lock, flags);
197 }
198
clk_core_rate_is_protected(struct clk_core * core)199 static bool clk_core_rate_is_protected(struct clk_core *core)
200 {
201 return core->protect_count;
202 }
203
clk_core_is_prepared(struct clk_core * core)204 static bool clk_core_is_prepared(struct clk_core *core)
205 {
206 bool ret = false;
207
208 /*
209 * .is_prepared is optional for clocks that can prepare
210 * fall back to software usage counter if it is missing
211 */
212 if (!core->ops->is_prepared)
213 return core->prepare_count;
214
215 if (!clk_pm_runtime_get(core)) {
216 ret = core->ops->is_prepared(core->hw);
217 clk_pm_runtime_put(core);
218 }
219
220 return ret;
221 }
222
clk_core_is_enabled(struct clk_core * core)223 static bool clk_core_is_enabled(struct clk_core *core)
224 {
225 bool ret = false;
226
227 /*
228 * .is_enabled is only mandatory for clocks that gate
229 * fall back to software usage counter if .is_enabled is missing
230 */
231 if (!core->ops->is_enabled)
232 return core->enable_count;
233
234 /*
235 * Check if clock controller's device is runtime active before
236 * calling .is_enabled callback. If not, assume that clock is
237 * disabled, because we might be called from atomic context, from
238 * which pm_runtime_get() is not allowed.
239 * This function is called mainly from clk_disable_unused_subtree,
240 * which ensures proper runtime pm activation of controller before
241 * taking enable spinlock, but the below check is needed if one tries
242 * to call it from other places.
243 */
244 if (core->rpm_enabled) {
245 pm_runtime_get_noresume(core->dev);
246 if (!pm_runtime_active(core->dev)) {
247 ret = false;
248 goto done;
249 }
250 }
251
252 ret = core->ops->is_enabled(core->hw);
253 done:
254 if (core->rpm_enabled)
255 pm_runtime_put(core->dev);
256
257 return ret;
258 }
259
260 /*** helper functions ***/
261
__clk_get_name(const struct clk * clk)262 const char *__clk_get_name(const struct clk *clk)
263 {
264 return !clk ? NULL : clk->core->name;
265 }
266 EXPORT_SYMBOL_GPL(__clk_get_name);
267
clk_hw_get_name(const struct clk_hw * hw)268 const char *clk_hw_get_name(const struct clk_hw *hw)
269 {
270 return hw->core->name;
271 }
272 EXPORT_SYMBOL_GPL(clk_hw_get_name);
273
__clk_get_hw(struct clk * clk)274 struct clk_hw *__clk_get_hw(struct clk *clk)
275 {
276 return !clk ? NULL : clk->core->hw;
277 }
278 EXPORT_SYMBOL_GPL(__clk_get_hw);
279
clk_hw_get_num_parents(const struct clk_hw * hw)280 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
281 {
282 return hw->core->num_parents;
283 }
284 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
285
clk_hw_get_parent(const struct clk_hw * hw)286 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
287 {
288 return hw->core->parent ? hw->core->parent->hw : NULL;
289 }
290 EXPORT_SYMBOL_GPL(clk_hw_get_parent);
291
__clk_lookup_subtree(const char * name,struct clk_core * core)292 static struct clk_core *__clk_lookup_subtree(const char *name,
293 struct clk_core *core)
294 {
295 struct clk_core *child;
296 struct clk_core *ret;
297
298 if (!strcmp(core->name, name))
299 return core;
300
301 hlist_for_each_entry(child, &core->children, child_node) {
302 ret = __clk_lookup_subtree(name, child);
303 if (ret)
304 return ret;
305 }
306
307 return NULL;
308 }
309
clk_core_lookup(const char * name)310 static struct clk_core *clk_core_lookup(const char *name)
311 {
312 struct clk_core *root_clk;
313 struct clk_core *ret;
314
315 if (!name)
316 return NULL;
317
318 /* search the 'proper' clk tree first */
319 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
320 ret = __clk_lookup_subtree(name, root_clk);
321 if (ret)
322 return ret;
323 }
324
325 /* if not found, then search the orphan tree */
326 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
327 ret = __clk_lookup_subtree(name, root_clk);
328 if (ret)
329 return ret;
330 }
331
332 return NULL;
333 }
334
335 #ifdef CONFIG_OF
336 static int of_parse_clkspec(const struct device_node *np, int index,
337 const char *name, struct of_phandle_args *out_args);
338 static struct clk_hw *
339 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
340 #else
of_parse_clkspec(const struct device_node * np,int index,const char * name,struct of_phandle_args * out_args)341 static inline int of_parse_clkspec(const struct device_node *np, int index,
342 const char *name,
343 struct of_phandle_args *out_args)
344 {
345 return -ENOENT;
346 }
347 static inline struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args * clkspec)348 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
349 {
350 return ERR_PTR(-ENOENT);
351 }
352 #endif
353
354 /**
355 * clk_core_get - Find the clk_core parent of a clk
356 * @core: clk to find parent of
357 * @p_index: parent index to search for
358 *
359 * This is the preferred method for clk providers to find the parent of a
360 * clk when that parent is external to the clk controller. The parent_names
361 * array is indexed and treated as a local name matching a string in the device
362 * node's 'clock-names' property or as the 'con_id' matching the device's
363 * dev_name() in a clk_lookup. This allows clk providers to use their own
364 * namespace instead of looking for a globally unique parent string.
365 *
366 * For example the following DT snippet would allow a clock registered by the
367 * clock-controller@c001 that has a clk_init_data::parent_data array
368 * with 'xtal' in the 'name' member to find the clock provided by the
369 * clock-controller@f00abcd without needing to get the globally unique name of
370 * the xtal clk.
371 *
372 * parent: clock-controller@f00abcd {
373 * reg = <0xf00abcd 0xabcd>;
374 * #clock-cells = <0>;
375 * };
376 *
377 * clock-controller@c001 {
378 * reg = <0xc001 0xf00d>;
379 * clocks = <&parent>;
380 * clock-names = "xtal";
381 * #clock-cells = <1>;
382 * };
383 *
384 * Returns: -ENOENT when the provider can't be found or the clk doesn't
385 * exist in the provider or the name can't be found in the DT node or
386 * in a clkdev lookup. NULL when the provider knows about the clk but it
387 * isn't provided on this system.
388 * A valid clk_core pointer when the clk can be found in the provider.
389 */
clk_core_get(struct clk_core * core,u8 p_index)390 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
391 {
392 const char *name = core->parents[p_index].fw_name;
393 int index = core->parents[p_index].index;
394 struct clk_hw *hw = ERR_PTR(-ENOENT);
395 struct device *dev = core->dev;
396 const char *dev_id = dev ? dev_name(dev) : NULL;
397 struct device_node *np = core->of_node;
398 struct of_phandle_args clkspec;
399
400 if (np && (name || index >= 0) &&
401 !of_parse_clkspec(np, index, name, &clkspec)) {
402 hw = of_clk_get_hw_from_clkspec(&clkspec);
403 of_node_put(clkspec.np);
404 } else if (name) {
405 /*
406 * If the DT search above couldn't find the provider fallback to
407 * looking up via clkdev based clk_lookups.
408 */
409 hw = clk_find_hw(dev_id, name);
410 }
411
412 if (IS_ERR(hw))
413 return ERR_CAST(hw);
414
415 return hw->core;
416 }
417
clk_core_fill_parent_index(struct clk_core * core,u8 index)418 static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
419 {
420 struct clk_parent_map *entry = &core->parents[index];
421 struct clk_core *parent = ERR_PTR(-ENOENT);
422
423 if (entry->hw) {
424 parent = entry->hw->core;
425 /*
426 * We have a direct reference but it isn't registered yet?
427 * Orphan it and let clk_reparent() update the orphan status
428 * when the parent is registered.
429 */
430 if (!parent)
431 parent = ERR_PTR(-EPROBE_DEFER);
432 } else {
433 parent = clk_core_get(core, index);
434 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
435 parent = clk_core_lookup(entry->name);
436 }
437
438 /* Only cache it if it's not an error */
439 if (!IS_ERR(parent))
440 entry->core = parent;
441 }
442
clk_core_get_parent_by_index(struct clk_core * core,u8 index)443 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
444 u8 index)
445 {
446 if (!core || index >= core->num_parents || !core->parents)
447 return NULL;
448
449 if (!core->parents[index].core)
450 clk_core_fill_parent_index(core, index);
451
452 return core->parents[index].core;
453 }
454
455 struct clk_hw *
clk_hw_get_parent_by_index(const struct clk_hw * hw,unsigned int index)456 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
457 {
458 struct clk_core *parent;
459
460 parent = clk_core_get_parent_by_index(hw->core, index);
461
462 return !parent ? NULL : parent->hw;
463 }
464 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
465
__clk_get_enable_count(struct clk * clk)466 unsigned int __clk_get_enable_count(struct clk *clk)
467 {
468 return !clk ? 0 : clk->core->enable_count;
469 }
470
clk_core_get_rate_nolock(struct clk_core * core)471 static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
472 {
473 if (!core)
474 return 0;
475
476 if (!core->num_parents || core->parent)
477 return core->rate;
478
479 /*
480 * Clk must have a parent because num_parents > 0 but the parent isn't
481 * known yet. Best to return 0 as the rate of this clk until we can
482 * properly recalc the rate based on the parent's rate.
483 */
484 return 0;
485 }
486
clk_hw_get_rate(const struct clk_hw * hw)487 unsigned long clk_hw_get_rate(const struct clk_hw *hw)
488 {
489 return clk_core_get_rate_nolock(hw->core);
490 }
491 EXPORT_SYMBOL_GPL(clk_hw_get_rate);
492
__clk_get_accuracy(struct clk_core * core)493 static unsigned long __clk_get_accuracy(struct clk_core *core)
494 {
495 if (!core)
496 return 0;
497
498 return core->accuracy;
499 }
500
__clk_get_flags(struct clk * clk)501 unsigned long __clk_get_flags(struct clk *clk)
502 {
503 return !clk ? 0 : clk->core->flags;
504 }
505 EXPORT_SYMBOL_GPL(__clk_get_flags);
506
clk_hw_get_flags(const struct clk_hw * hw)507 unsigned long clk_hw_get_flags(const struct clk_hw *hw)
508 {
509 return hw->core->flags;
510 }
511 EXPORT_SYMBOL_GPL(clk_hw_get_flags);
512
clk_hw_is_prepared(const struct clk_hw * hw)513 bool clk_hw_is_prepared(const struct clk_hw *hw)
514 {
515 return clk_core_is_prepared(hw->core);
516 }
517 EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
518
clk_hw_rate_is_protected(const struct clk_hw * hw)519 bool clk_hw_rate_is_protected(const struct clk_hw *hw)
520 {
521 return clk_core_rate_is_protected(hw->core);
522 }
523 EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
524
clk_hw_is_enabled(const struct clk_hw * hw)525 bool clk_hw_is_enabled(const struct clk_hw *hw)
526 {
527 return clk_core_is_enabled(hw->core);
528 }
529 EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
530
__clk_is_enabled(struct clk * clk)531 bool __clk_is_enabled(struct clk *clk)
532 {
533 if (!clk)
534 return false;
535
536 return clk_core_is_enabled(clk->core);
537 }
538 EXPORT_SYMBOL_GPL(__clk_is_enabled);
539
mux_is_better_rate(unsigned long rate,unsigned long now,unsigned long best,unsigned long flags)540 static bool mux_is_better_rate(unsigned long rate, unsigned long now,
541 unsigned long best, unsigned long flags)
542 {
543 if (flags & CLK_MUX_ROUND_CLOSEST)
544 return abs(now - rate) < abs(best - rate);
545
546 return now <= rate && now > best;
547 }
548
clk_mux_determine_rate_flags(struct clk_hw * hw,struct clk_rate_request * req,unsigned long flags)549 int clk_mux_determine_rate_flags(struct clk_hw *hw,
550 struct clk_rate_request *req,
551 unsigned long flags)
552 {
553 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
554 int i, num_parents, ret;
555 unsigned long best = 0;
556 struct clk_rate_request parent_req = *req;
557
558 /* if NO_REPARENT flag set, pass through to current parent */
559 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
560 parent = core->parent;
561 if (core->flags & CLK_SET_RATE_PARENT) {
562 ret = __clk_determine_rate(parent ? parent->hw : NULL,
563 &parent_req);
564 if (ret)
565 return ret;
566
567 best = parent_req.rate;
568 } else if (parent) {
569 best = clk_core_get_rate_nolock(parent);
570 } else {
571 best = clk_core_get_rate_nolock(core);
572 }
573
574 goto out;
575 }
576
577 /* find the parent that can provide the fastest rate <= rate */
578 num_parents = core->num_parents;
579 for (i = 0; i < num_parents; i++) {
580 parent = clk_core_get_parent_by_index(core, i);
581 if (!parent)
582 continue;
583
584 if (core->flags & CLK_SET_RATE_PARENT) {
585 parent_req = *req;
586 ret = __clk_determine_rate(parent->hw, &parent_req);
587 if (ret)
588 continue;
589 } else {
590 parent_req.rate = clk_core_get_rate_nolock(parent);
591 }
592
593 if (mux_is_better_rate(req->rate, parent_req.rate,
594 best, flags)) {
595 best_parent = parent;
596 best = parent_req.rate;
597 }
598 }
599
600 if (!best_parent)
601 return -EINVAL;
602
603 out:
604 if (best_parent)
605 req->best_parent_hw = best_parent->hw;
606 req->best_parent_rate = best;
607 req->rate = best;
608
609 return 0;
610 }
611 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
612
__clk_lookup(const char * name)613 struct clk *__clk_lookup(const char *name)
614 {
615 struct clk_core *core = clk_core_lookup(name);
616
617 return !core ? NULL : core->hw->clk;
618 }
619
clk_core_get_boundaries(struct clk_core * core,unsigned long * min_rate,unsigned long * max_rate)620 static void clk_core_get_boundaries(struct clk_core *core,
621 unsigned long *min_rate,
622 unsigned long *max_rate)
623 {
624 struct clk *clk_user;
625
626 lockdep_assert_held(&prepare_lock);
627
628 *min_rate = core->min_rate;
629 *max_rate = core->max_rate;
630
631 hlist_for_each_entry(clk_user, &core->clks, clks_node)
632 *min_rate = max(*min_rate, clk_user->min_rate);
633
634 hlist_for_each_entry(clk_user, &core->clks, clks_node)
635 *max_rate = min(*max_rate, clk_user->max_rate);
636 }
637
clk_hw_set_rate_range(struct clk_hw * hw,unsigned long min_rate,unsigned long max_rate)638 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
639 unsigned long max_rate)
640 {
641 hw->core->min_rate = min_rate;
642 hw->core->max_rate = max_rate;
643 }
644 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
645
646 /*
647 * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
648 * @hw: mux type clk to determine rate on
649 * @req: rate request, also used to return preferred parent and frequencies
650 *
651 * Helper for finding best parent to provide a given frequency. This can be used
652 * directly as a determine_rate callback (e.g. for a mux), or from a more
653 * complex clock that may combine a mux with other operations.
654 *
655 * Returns: 0 on success, -EERROR value on error
656 */
__clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)657 int __clk_mux_determine_rate(struct clk_hw *hw,
658 struct clk_rate_request *req)
659 {
660 return clk_mux_determine_rate_flags(hw, req, 0);
661 }
662 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
663
__clk_mux_determine_rate_closest(struct clk_hw * hw,struct clk_rate_request * req)664 int __clk_mux_determine_rate_closest(struct clk_hw *hw,
665 struct clk_rate_request *req)
666 {
667 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
668 }
669 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
670
671 /*** clk api ***/
672
clk_core_rate_unprotect(struct clk_core * core)673 static void clk_core_rate_unprotect(struct clk_core *core)
674 {
675 lockdep_assert_held(&prepare_lock);
676
677 if (!core)
678 return;
679
680 if (WARN(core->protect_count == 0,
681 "%s already unprotected\n", core->name))
682 return;
683
684 if (--core->protect_count > 0)
685 return;
686
687 clk_core_rate_unprotect(core->parent);
688 }
689
clk_core_rate_nuke_protect(struct clk_core * core)690 static int clk_core_rate_nuke_protect(struct clk_core *core)
691 {
692 int ret;
693
694 lockdep_assert_held(&prepare_lock);
695
696 if (!core)
697 return -EINVAL;
698
699 if (core->protect_count == 0)
700 return 0;
701
702 ret = core->protect_count;
703 core->protect_count = 1;
704 clk_core_rate_unprotect(core);
705
706 return ret;
707 }
708
709 /**
710 * clk_rate_exclusive_put - release exclusivity over clock rate control
711 * @clk: the clk over which the exclusivity is released
712 *
713 * clk_rate_exclusive_put() completes a critical section during which a clock
714 * consumer cannot tolerate any other consumer making any operation on the
715 * clock which could result in a rate change or rate glitch. Exclusive clocks
716 * cannot have their rate changed, either directly or indirectly due to changes
717 * further up the parent chain of clocks. As a result, clocks up parent chain
718 * also get under exclusive control of the calling consumer.
719 *
720 * If exlusivity is claimed more than once on clock, even by the same consumer,
721 * the rate effectively gets locked as exclusivity can't be preempted.
722 *
723 * Calls to clk_rate_exclusive_put() must be balanced with calls to
724 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
725 * error status.
726 */
clk_rate_exclusive_put(struct clk * clk)727 void clk_rate_exclusive_put(struct clk *clk)
728 {
729 if (!clk)
730 return;
731
732 clk_prepare_lock();
733
734 /*
735 * if there is something wrong with this consumer protect count, stop
736 * here before messing with the provider
737 */
738 if (WARN_ON(clk->exclusive_count <= 0))
739 goto out;
740
741 clk_core_rate_unprotect(clk->core);
742 clk->exclusive_count--;
743 out:
744 clk_prepare_unlock();
745 }
746 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
747
clk_core_rate_protect(struct clk_core * core)748 static void clk_core_rate_protect(struct clk_core *core)
749 {
750 lockdep_assert_held(&prepare_lock);
751
752 if (!core)
753 return;
754
755 if (core->protect_count == 0)
756 clk_core_rate_protect(core->parent);
757
758 core->protect_count++;
759 }
760
clk_core_rate_restore_protect(struct clk_core * core,int count)761 static void clk_core_rate_restore_protect(struct clk_core *core, int count)
762 {
763 lockdep_assert_held(&prepare_lock);
764
765 if (!core)
766 return;
767
768 if (count == 0)
769 return;
770
771 clk_core_rate_protect(core);
772 core->protect_count = count;
773 }
774
775 /**
776 * clk_rate_exclusive_get - get exclusivity over the clk rate control
777 * @clk: the clk over which the exclusity of rate control is requested
778 *
779 * clk_rate_exlusive_get() begins a critical section during which a clock
780 * consumer cannot tolerate any other consumer making any operation on the
781 * clock which could result in a rate change or rate glitch. Exclusive clocks
782 * cannot have their rate changed, either directly or indirectly due to changes
783 * further up the parent chain of clocks. As a result, clocks up parent chain
784 * also get under exclusive control of the calling consumer.
785 *
786 * If exlusivity is claimed more than once on clock, even by the same consumer,
787 * the rate effectively gets locked as exclusivity can't be preempted.
788 *
789 * Calls to clk_rate_exclusive_get() should be balanced with calls to
790 * clk_rate_exclusive_put(). Calls to this function may sleep.
791 * Returns 0 on success, -EERROR otherwise
792 */
clk_rate_exclusive_get(struct clk * clk)793 int clk_rate_exclusive_get(struct clk *clk)
794 {
795 if (!clk)
796 return 0;
797
798 clk_prepare_lock();
799 clk_core_rate_protect(clk->core);
800 clk->exclusive_count++;
801 clk_prepare_unlock();
802
803 return 0;
804 }
805 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
806
clk_core_unprepare(struct clk_core * core)807 static void clk_core_unprepare(struct clk_core *core)
808 {
809 lockdep_assert_held(&prepare_lock);
810
811 if (!core)
812 return;
813
814 if (WARN(core->prepare_count == 0,
815 "%s already unprepared\n", core->name))
816 return;
817
818 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
819 "Unpreparing critical %s\n", core->name))
820 return;
821
822 if (core->flags & CLK_SET_RATE_GATE)
823 clk_core_rate_unprotect(core);
824
825 if (--core->prepare_count > 0)
826 return;
827
828 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
829
830 trace_clk_unprepare(core);
831
832 if (core->ops->unprepare)
833 core->ops->unprepare(core->hw);
834
835 clk_pm_runtime_put(core);
836
837 trace_clk_unprepare_complete(core);
838 clk_core_unprepare(core->parent);
839 }
840
clk_core_unprepare_lock(struct clk_core * core)841 static void clk_core_unprepare_lock(struct clk_core *core)
842 {
843 clk_prepare_lock();
844 clk_core_unprepare(core);
845 clk_prepare_unlock();
846 }
847
848 /**
849 * clk_unprepare - undo preparation of a clock source
850 * @clk: the clk being unprepared
851 *
852 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
853 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
854 * if the operation may sleep. One example is a clk which is accessed over
855 * I2c. In the complex case a clk gate operation may require a fast and a slow
856 * part. It is this reason that clk_unprepare and clk_disable are not mutually
857 * exclusive. In fact clk_disable must be called before clk_unprepare.
858 */
clk_unprepare(struct clk * clk)859 void clk_unprepare(struct clk *clk)
860 {
861 if (IS_ERR_OR_NULL(clk))
862 return;
863
864 clk_core_unprepare_lock(clk->core);
865 }
866 EXPORT_SYMBOL_GPL(clk_unprepare);
867
clk_core_prepare(struct clk_core * core)868 static int clk_core_prepare(struct clk_core *core)
869 {
870 int ret = 0;
871
872 lockdep_assert_held(&prepare_lock);
873
874 if (!core)
875 return 0;
876
877 if (core->prepare_count == 0) {
878 ret = clk_pm_runtime_get(core);
879 if (ret)
880 return ret;
881
882 ret = clk_core_prepare(core->parent);
883 if (ret)
884 goto runtime_put;
885
886 trace_clk_prepare(core);
887
888 if (core->ops->prepare)
889 ret = core->ops->prepare(core->hw);
890
891 trace_clk_prepare_complete(core);
892
893 if (ret)
894 goto unprepare;
895 }
896
897 core->prepare_count++;
898
899 /*
900 * CLK_SET_RATE_GATE is a special case of clock protection
901 * Instead of a consumer claiming exclusive rate control, it is
902 * actually the provider which prevents any consumer from making any
903 * operation which could result in a rate change or rate glitch while
904 * the clock is prepared.
905 */
906 if (core->flags & CLK_SET_RATE_GATE)
907 clk_core_rate_protect(core);
908
909 return 0;
910 unprepare:
911 clk_core_unprepare(core->parent);
912 runtime_put:
913 clk_pm_runtime_put(core);
914 return ret;
915 }
916
clk_core_prepare_lock(struct clk_core * core)917 static int clk_core_prepare_lock(struct clk_core *core)
918 {
919 int ret;
920
921 clk_prepare_lock();
922 ret = clk_core_prepare(core);
923 clk_prepare_unlock();
924
925 return ret;
926 }
927
928 /**
929 * clk_prepare - prepare a clock source
930 * @clk: the clk being prepared
931 *
932 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
933 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
934 * operation may sleep. One example is a clk which is accessed over I2c. In
935 * the complex case a clk ungate operation may require a fast and a slow part.
936 * It is this reason that clk_prepare and clk_enable are not mutually
937 * exclusive. In fact clk_prepare must be called before clk_enable.
938 * Returns 0 on success, -EERROR otherwise.
939 */
clk_prepare(struct clk * clk)940 int clk_prepare(struct clk *clk)
941 {
942 if (!clk)
943 return 0;
944
945 return clk_core_prepare_lock(clk->core);
946 }
947 EXPORT_SYMBOL_GPL(clk_prepare);
948
clk_core_disable(struct clk_core * core)949 static void clk_core_disable(struct clk_core *core)
950 {
951 lockdep_assert_held(&enable_lock);
952
953 if (!core)
954 return;
955
956 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
957 return;
958
959 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
960 "Disabling critical %s\n", core->name))
961 return;
962
963 if (--core->enable_count > 0)
964 return;
965
966 trace_clk_disable_rcuidle(core);
967
968 if (core->ops->disable)
969 core->ops->disable(core->hw);
970
971 trace_clk_disable_complete_rcuidle(core);
972
973 clk_core_disable(core->parent);
974 }
975
clk_core_disable_lock(struct clk_core * core)976 static void clk_core_disable_lock(struct clk_core *core)
977 {
978 unsigned long flags;
979
980 flags = clk_enable_lock();
981 clk_core_disable(core);
982 clk_enable_unlock(flags);
983 }
984
985 /**
986 * clk_disable - gate a clock
987 * @clk: the clk being gated
988 *
989 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
990 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
991 * clk if the operation is fast and will never sleep. One example is a
992 * SoC-internal clk which is controlled via simple register writes. In the
993 * complex case a clk gate operation may require a fast and a slow part. It is
994 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
995 * In fact clk_disable must be called before clk_unprepare.
996 */
clk_disable(struct clk * clk)997 void clk_disable(struct clk *clk)
998 {
999 if (IS_ERR_OR_NULL(clk))
1000 return;
1001
1002 clk_core_disable_lock(clk->core);
1003 }
1004 EXPORT_SYMBOL_GPL(clk_disable);
1005
clk_core_enable(struct clk_core * core)1006 static int clk_core_enable(struct clk_core *core)
1007 {
1008 int ret = 0;
1009
1010 lockdep_assert_held(&enable_lock);
1011
1012 if (!core)
1013 return 0;
1014
1015 if (WARN(core->prepare_count == 0,
1016 "Enabling unprepared %s\n", core->name))
1017 return -ESHUTDOWN;
1018
1019 if (core->enable_count == 0) {
1020 ret = clk_core_enable(core->parent);
1021
1022 if (ret)
1023 return ret;
1024
1025 trace_clk_enable_rcuidle(core);
1026
1027 if (core->ops->enable)
1028 ret = core->ops->enable(core->hw);
1029
1030 trace_clk_enable_complete_rcuidle(core);
1031
1032 if (ret) {
1033 clk_core_disable(core->parent);
1034 return ret;
1035 }
1036 }
1037
1038 core->enable_count++;
1039 return 0;
1040 }
1041
clk_core_enable_lock(struct clk_core * core)1042 static int clk_core_enable_lock(struct clk_core *core)
1043 {
1044 unsigned long flags;
1045 int ret;
1046
1047 flags = clk_enable_lock();
1048 ret = clk_core_enable(core);
1049 clk_enable_unlock(flags);
1050
1051 return ret;
1052 }
1053
1054 /**
1055 * clk_gate_restore_context - restore context for poweroff
1056 * @hw: the clk_hw pointer of clock whose state is to be restored
1057 *
1058 * The clock gate restore context function enables or disables
1059 * the gate clocks based on the enable_count. This is done in cases
1060 * where the clock context is lost and based on the enable_count
1061 * the clock either needs to be enabled/disabled. This
1062 * helps restore the state of gate clocks.
1063 */
clk_gate_restore_context(struct clk_hw * hw)1064 void clk_gate_restore_context(struct clk_hw *hw)
1065 {
1066 struct clk_core *core = hw->core;
1067
1068 if (core->enable_count)
1069 core->ops->enable(hw);
1070 else
1071 core->ops->disable(hw);
1072 }
1073 EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1074
clk_core_save_context(struct clk_core * core)1075 static int clk_core_save_context(struct clk_core *core)
1076 {
1077 struct clk_core *child;
1078 int ret = 0;
1079
1080 hlist_for_each_entry(child, &core->children, child_node) {
1081 ret = clk_core_save_context(child);
1082 if (ret < 0)
1083 return ret;
1084 }
1085
1086 if (core->ops && core->ops->save_context)
1087 ret = core->ops->save_context(core->hw);
1088
1089 return ret;
1090 }
1091
clk_core_restore_context(struct clk_core * core)1092 static void clk_core_restore_context(struct clk_core *core)
1093 {
1094 struct clk_core *child;
1095
1096 if (core->ops && core->ops->restore_context)
1097 core->ops->restore_context(core->hw);
1098
1099 hlist_for_each_entry(child, &core->children, child_node)
1100 clk_core_restore_context(child);
1101 }
1102
1103 /**
1104 * clk_save_context - save clock context for poweroff
1105 *
1106 * Saves the context of the clock register for powerstates in which the
1107 * contents of the registers will be lost. Occurs deep within the suspend
1108 * code. Returns 0 on success.
1109 */
clk_save_context(void)1110 int clk_save_context(void)
1111 {
1112 struct clk_core *clk;
1113 int ret;
1114
1115 hlist_for_each_entry(clk, &clk_root_list, child_node) {
1116 ret = clk_core_save_context(clk);
1117 if (ret < 0)
1118 return ret;
1119 }
1120
1121 hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1122 ret = clk_core_save_context(clk);
1123 if (ret < 0)
1124 return ret;
1125 }
1126
1127 return 0;
1128 }
1129 EXPORT_SYMBOL_GPL(clk_save_context);
1130
1131 /**
1132 * clk_restore_context - restore clock context after poweroff
1133 *
1134 * Restore the saved clock context upon resume.
1135 *
1136 */
clk_restore_context(void)1137 void clk_restore_context(void)
1138 {
1139 struct clk_core *core;
1140
1141 hlist_for_each_entry(core, &clk_root_list, child_node)
1142 clk_core_restore_context(core);
1143
1144 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1145 clk_core_restore_context(core);
1146 }
1147 EXPORT_SYMBOL_GPL(clk_restore_context);
1148
1149 /**
1150 * clk_enable - ungate a clock
1151 * @clk: the clk being ungated
1152 *
1153 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
1154 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1155 * if the operation will never sleep. One example is a SoC-internal clk which
1156 * is controlled via simple register writes. In the complex case a clk ungate
1157 * operation may require a fast and a slow part. It is this reason that
1158 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
1159 * must be called before clk_enable. Returns 0 on success, -EERROR
1160 * otherwise.
1161 */
clk_enable(struct clk * clk)1162 int clk_enable(struct clk *clk)
1163 {
1164 if (!clk)
1165 return 0;
1166
1167 return clk_core_enable_lock(clk->core);
1168 }
1169 EXPORT_SYMBOL_GPL(clk_enable);
1170
clk_core_prepare_enable(struct clk_core * core)1171 static int clk_core_prepare_enable(struct clk_core *core)
1172 {
1173 int ret;
1174
1175 ret = clk_core_prepare_lock(core);
1176 if (ret)
1177 return ret;
1178
1179 ret = clk_core_enable_lock(core);
1180 if (ret)
1181 clk_core_unprepare_lock(core);
1182
1183 return ret;
1184 }
1185
clk_core_disable_unprepare(struct clk_core * core)1186 static void clk_core_disable_unprepare(struct clk_core *core)
1187 {
1188 clk_core_disable_lock(core);
1189 clk_core_unprepare_lock(core);
1190 }
1191
clk_unprepare_unused_subtree(struct clk_core * core)1192 static void clk_unprepare_unused_subtree(struct clk_core *core)
1193 {
1194 struct clk_core *child;
1195
1196 lockdep_assert_held(&prepare_lock);
1197
1198 hlist_for_each_entry(child, &core->children, child_node)
1199 clk_unprepare_unused_subtree(child);
1200
1201 if (dev_has_sync_state(core->dev) &&
1202 !(core->flags & CLK_DONT_HOLD_STATE))
1203 return;
1204
1205 if (core->prepare_count)
1206 return;
1207
1208 if (core->flags & CLK_IGNORE_UNUSED)
1209 return;
1210
1211 if (clk_pm_runtime_get(core))
1212 return;
1213
1214 if (clk_core_is_prepared(core)) {
1215 trace_clk_unprepare(core);
1216 if (core->ops->unprepare_unused)
1217 core->ops->unprepare_unused(core->hw);
1218 else if (core->ops->unprepare)
1219 core->ops->unprepare(core->hw);
1220 trace_clk_unprepare_complete(core);
1221 }
1222
1223 clk_pm_runtime_put(core);
1224 }
1225
clk_disable_unused_subtree(struct clk_core * core)1226 static void clk_disable_unused_subtree(struct clk_core *core)
1227 {
1228 struct clk_core *child;
1229 unsigned long flags;
1230
1231 lockdep_assert_held(&prepare_lock);
1232
1233 hlist_for_each_entry(child, &core->children, child_node)
1234 clk_disable_unused_subtree(child);
1235
1236 if (dev_has_sync_state(core->dev) &&
1237 !(core->flags & CLK_DONT_HOLD_STATE))
1238 return;
1239
1240 if (core->flags & CLK_OPS_PARENT_ENABLE)
1241 clk_core_prepare_enable(core->parent);
1242
1243 if (clk_pm_runtime_get(core))
1244 goto unprepare_out;
1245
1246 flags = clk_enable_lock();
1247
1248 if (core->enable_count)
1249 goto unlock_out;
1250
1251 if (core->flags & CLK_IGNORE_UNUSED)
1252 goto unlock_out;
1253
1254 /*
1255 * some gate clocks have special needs during the disable-unused
1256 * sequence. call .disable_unused if available, otherwise fall
1257 * back to .disable
1258 */
1259 if (clk_core_is_enabled(core)) {
1260 trace_clk_disable(core);
1261 if (core->ops->disable_unused)
1262 core->ops->disable_unused(core->hw);
1263 else if (core->ops->disable)
1264 core->ops->disable(core->hw);
1265 trace_clk_disable_complete(core);
1266 }
1267
1268 unlock_out:
1269 clk_enable_unlock(flags);
1270 clk_pm_runtime_put(core);
1271 unprepare_out:
1272 if (core->flags & CLK_OPS_PARENT_ENABLE)
1273 clk_core_disable_unprepare(core->parent);
1274 }
1275
1276 static bool clk_ignore_unused;
clk_ignore_unused_setup(char * __unused)1277 static int __init clk_ignore_unused_setup(char *__unused)
1278 {
1279 clk_ignore_unused = true;
1280 return 1;
1281 }
1282 __setup("clk_ignore_unused", clk_ignore_unused_setup);
1283
clk_disable_unused(void)1284 static int clk_disable_unused(void)
1285 {
1286 struct clk_core *core;
1287
1288 if (clk_ignore_unused) {
1289 pr_warn("clk: Not disabling unused clocks\n");
1290 return 0;
1291 }
1292
1293 clk_prepare_lock();
1294
1295 hlist_for_each_entry(core, &clk_root_list, child_node)
1296 clk_disable_unused_subtree(core);
1297
1298 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1299 clk_disable_unused_subtree(core);
1300
1301 hlist_for_each_entry(core, &clk_root_list, child_node)
1302 clk_unprepare_unused_subtree(core);
1303
1304 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1305 clk_unprepare_unused_subtree(core);
1306
1307 clk_prepare_unlock();
1308
1309 return 0;
1310 }
1311 late_initcall_sync(clk_disable_unused);
1312
clk_unprepare_disable_dev_subtree(struct clk_core * core,struct device * dev)1313 static void clk_unprepare_disable_dev_subtree(struct clk_core *core,
1314 struct device *dev)
1315 {
1316 struct clk_core *child;
1317
1318 lockdep_assert_held(&prepare_lock);
1319
1320 hlist_for_each_entry(child, &core->children, child_node)
1321 clk_unprepare_disable_dev_subtree(child, dev);
1322
1323 if (core->dev != dev || !core->need_sync)
1324 return;
1325
1326 clk_core_disable_unprepare(core);
1327 }
1328
clk_sync_state(struct device * dev)1329 void clk_sync_state(struct device *dev)
1330 {
1331 struct clk_core *core;
1332
1333 clk_prepare_lock();
1334
1335 hlist_for_each_entry(core, &clk_root_list, child_node)
1336 clk_unprepare_disable_dev_subtree(core, dev);
1337
1338 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1339 clk_unprepare_disable_dev_subtree(core, dev);
1340
1341 clk_prepare_unlock();
1342 }
1343 EXPORT_SYMBOL_GPL(clk_sync_state);
1344
clk_core_determine_round_nolock(struct clk_core * core,struct clk_rate_request * req)1345 static int clk_core_determine_round_nolock(struct clk_core *core,
1346 struct clk_rate_request *req)
1347 {
1348 long rate;
1349
1350 lockdep_assert_held(&prepare_lock);
1351
1352 if (!core)
1353 return 0;
1354
1355 /*
1356 * At this point, core protection will be disabled if
1357 * - if the provider is not protected at all
1358 * - if the calling consumer is the only one which has exclusivity
1359 * over the provider
1360 */
1361 if (clk_core_rate_is_protected(core)) {
1362 req->rate = core->rate;
1363 } else if (core->ops->determine_rate) {
1364 return core->ops->determine_rate(core->hw, req);
1365 } else if (core->ops->round_rate) {
1366 rate = core->ops->round_rate(core->hw, req->rate,
1367 &req->best_parent_rate);
1368 if (rate < 0)
1369 return rate;
1370
1371 req->rate = rate;
1372 } else {
1373 return -EINVAL;
1374 }
1375
1376 return 0;
1377 }
1378
clk_core_init_rate_req(struct clk_core * const core,struct clk_rate_request * req)1379 static void clk_core_init_rate_req(struct clk_core * const core,
1380 struct clk_rate_request *req)
1381 {
1382 struct clk_core *parent;
1383
1384 if (WARN_ON(!core || !req))
1385 return;
1386
1387 parent = core->parent;
1388 if (parent) {
1389 req->best_parent_hw = parent->hw;
1390 req->best_parent_rate = parent->rate;
1391 } else {
1392 req->best_parent_hw = NULL;
1393 req->best_parent_rate = 0;
1394 }
1395 }
1396
clk_core_can_round(struct clk_core * const core)1397 static bool clk_core_can_round(struct clk_core * const core)
1398 {
1399 return core->ops->determine_rate || core->ops->round_rate;
1400 }
1401
clk_core_round_rate_nolock(struct clk_core * core,struct clk_rate_request * req)1402 static int clk_core_round_rate_nolock(struct clk_core *core,
1403 struct clk_rate_request *req)
1404 {
1405 lockdep_assert_held(&prepare_lock);
1406
1407 if (!core) {
1408 req->rate = 0;
1409 return 0;
1410 }
1411
1412 clk_core_init_rate_req(core, req);
1413
1414 if (clk_core_can_round(core))
1415 return clk_core_determine_round_nolock(core, req);
1416 else if (core->flags & CLK_SET_RATE_PARENT)
1417 return clk_core_round_rate_nolock(core->parent, req);
1418
1419 req->rate = core->rate;
1420 return 0;
1421 }
1422
1423 /**
1424 * __clk_determine_rate - get the closest rate actually supported by a clock
1425 * @hw: determine the rate of this clock
1426 * @req: target rate request
1427 *
1428 * Useful for clk_ops such as .set_rate and .determine_rate.
1429 */
__clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1430 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1431 {
1432 if (!hw) {
1433 req->rate = 0;
1434 return 0;
1435 }
1436
1437 return clk_core_round_rate_nolock(hw->core, req);
1438 }
1439 EXPORT_SYMBOL_GPL(__clk_determine_rate);
1440
clk_hw_round_rate(struct clk_hw * hw,unsigned long rate)1441 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1442 {
1443 int ret;
1444 struct clk_rate_request req;
1445
1446 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1447 req.rate = rate;
1448
1449 ret = clk_core_round_rate_nolock(hw->core, &req);
1450 if (ret)
1451 return 0;
1452
1453 return req.rate;
1454 }
1455 EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1456
1457 /**
1458 * clk_round_rate - round the given rate for a clk
1459 * @clk: the clk for which we are rounding a rate
1460 * @rate: the rate which is to be rounded
1461 *
1462 * Takes in a rate as input and rounds it to a rate that the clk can actually
1463 * use which is then returned. If clk doesn't support round_rate operation
1464 * then the parent rate is returned.
1465 */
clk_round_rate(struct clk * clk,unsigned long rate)1466 long clk_round_rate(struct clk *clk, unsigned long rate)
1467 {
1468 struct clk_rate_request req;
1469 int ret;
1470
1471 if (!clk)
1472 return 0;
1473
1474 clk_prepare_lock();
1475
1476 if (clk->exclusive_count)
1477 clk_core_rate_unprotect(clk->core);
1478
1479 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1480 req.rate = rate;
1481
1482 ret = clk_core_round_rate_nolock(clk->core, &req);
1483
1484 if (clk->exclusive_count)
1485 clk_core_rate_protect(clk->core);
1486
1487 clk_prepare_unlock();
1488
1489 if (ret)
1490 return ret;
1491
1492 return req.rate;
1493 }
1494 EXPORT_SYMBOL_GPL(clk_round_rate);
1495
1496 /**
1497 * __clk_notify - call clk notifier chain
1498 * @core: clk that is changing rate
1499 * @msg: clk notifier type (see include/linux/clk.h)
1500 * @old_rate: old clk rate
1501 * @new_rate: new clk rate
1502 *
1503 * Triggers a notifier call chain on the clk rate-change notification
1504 * for 'clk'. Passes a pointer to the struct clk and the previous
1505 * and current rates to the notifier callback. Intended to be called by
1506 * internal clock code only. Returns NOTIFY_DONE from the last driver
1507 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1508 * a driver returns that.
1509 */
__clk_notify(struct clk_core * core,unsigned long msg,unsigned long old_rate,unsigned long new_rate)1510 static int __clk_notify(struct clk_core *core, unsigned long msg,
1511 unsigned long old_rate, unsigned long new_rate)
1512 {
1513 struct clk_notifier *cn;
1514 struct clk_notifier_data cnd;
1515 int ret = NOTIFY_DONE;
1516
1517 cnd.old_rate = old_rate;
1518 cnd.new_rate = new_rate;
1519
1520 list_for_each_entry(cn, &clk_notifier_list, node) {
1521 if (cn->clk->core == core) {
1522 cnd.clk = cn->clk;
1523 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1524 &cnd);
1525 if (ret & NOTIFY_STOP_MASK)
1526 return ret;
1527 }
1528 }
1529
1530 return ret;
1531 }
1532
1533 /**
1534 * __clk_recalc_accuracies
1535 * @core: first clk in the subtree
1536 *
1537 * Walks the subtree of clks starting with clk and recalculates accuracies as
1538 * it goes. Note that if a clk does not implement the .recalc_accuracy
1539 * callback then it is assumed that the clock will take on the accuracy of its
1540 * parent.
1541 */
__clk_recalc_accuracies(struct clk_core * core)1542 static void __clk_recalc_accuracies(struct clk_core *core)
1543 {
1544 unsigned long parent_accuracy = 0;
1545 struct clk_core *child;
1546
1547 lockdep_assert_held(&prepare_lock);
1548
1549 if (core->parent)
1550 parent_accuracy = core->parent->accuracy;
1551
1552 if (core->ops->recalc_accuracy)
1553 core->accuracy = core->ops->recalc_accuracy(core->hw,
1554 parent_accuracy);
1555 else
1556 core->accuracy = parent_accuracy;
1557
1558 hlist_for_each_entry(child, &core->children, child_node)
1559 __clk_recalc_accuracies(child);
1560 }
1561
clk_core_get_accuracy(struct clk_core * core)1562 static long clk_core_get_accuracy(struct clk_core *core)
1563 {
1564 unsigned long accuracy;
1565
1566 clk_prepare_lock();
1567 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1568 __clk_recalc_accuracies(core);
1569
1570 accuracy = __clk_get_accuracy(core);
1571 clk_prepare_unlock();
1572
1573 return accuracy;
1574 }
1575
1576 /**
1577 * clk_get_accuracy - return the accuracy of clk
1578 * @clk: the clk whose accuracy is being returned
1579 *
1580 * Simply returns the cached accuracy of the clk, unless
1581 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1582 * issued.
1583 * If clk is NULL then returns 0.
1584 */
clk_get_accuracy(struct clk * clk)1585 long clk_get_accuracy(struct clk *clk)
1586 {
1587 if (!clk)
1588 return 0;
1589
1590 return clk_core_get_accuracy(clk->core);
1591 }
1592 EXPORT_SYMBOL_GPL(clk_get_accuracy);
1593
clk_recalc(struct clk_core * core,unsigned long parent_rate)1594 static unsigned long clk_recalc(struct clk_core *core,
1595 unsigned long parent_rate)
1596 {
1597 unsigned long rate = parent_rate;
1598
1599 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1600 rate = core->ops->recalc_rate(core->hw, parent_rate);
1601 clk_pm_runtime_put(core);
1602 }
1603 return rate;
1604 }
1605
1606 /**
1607 * __clk_recalc_rates
1608 * @core: first clk in the subtree
1609 * @msg: notification type (see include/linux/clk.h)
1610 *
1611 * Walks the subtree of clks starting with clk and recalculates rates as it
1612 * goes. Note that if a clk does not implement the .recalc_rate callback then
1613 * it is assumed that the clock will take on the rate of its parent.
1614 *
1615 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1616 * if necessary.
1617 */
__clk_recalc_rates(struct clk_core * core,unsigned long msg)1618 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1619 {
1620 unsigned long old_rate;
1621 unsigned long parent_rate = 0;
1622 struct clk_core *child;
1623
1624 lockdep_assert_held(&prepare_lock);
1625
1626 old_rate = core->rate;
1627
1628 if (core->parent)
1629 parent_rate = core->parent->rate;
1630
1631 core->rate = clk_recalc(core, parent_rate);
1632
1633 /*
1634 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1635 * & ABORT_RATE_CHANGE notifiers
1636 */
1637 if (core->notifier_count && msg)
1638 __clk_notify(core, msg, old_rate, core->rate);
1639
1640 hlist_for_each_entry(child, &core->children, child_node)
1641 __clk_recalc_rates(child, msg);
1642 }
1643
clk_core_get_rate(struct clk_core * core)1644 static unsigned long clk_core_get_rate(struct clk_core *core)
1645 {
1646 unsigned long rate;
1647
1648 clk_prepare_lock();
1649
1650 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1651 __clk_recalc_rates(core, 0);
1652
1653 rate = clk_core_get_rate_nolock(core);
1654 clk_prepare_unlock();
1655
1656 return rate;
1657 }
1658
1659 /**
1660 * clk_get_rate - return the rate of clk
1661 * @clk: the clk whose rate is being returned
1662 *
1663 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1664 * is set, which means a recalc_rate will be issued.
1665 * If clk is NULL then returns 0.
1666 */
clk_get_rate(struct clk * clk)1667 unsigned long clk_get_rate(struct clk *clk)
1668 {
1669 if (!clk)
1670 return 0;
1671
1672 return clk_core_get_rate(clk->core);
1673 }
1674 EXPORT_SYMBOL_GPL(clk_get_rate);
1675
clk_fetch_parent_index(struct clk_core * core,struct clk_core * parent)1676 static int clk_fetch_parent_index(struct clk_core *core,
1677 struct clk_core *parent)
1678 {
1679 int i;
1680
1681 if (!parent)
1682 return -EINVAL;
1683
1684 for (i = 0; i < core->num_parents; i++) {
1685 /* Found it first try! */
1686 if (core->parents[i].core == parent)
1687 return i;
1688
1689 /* Something else is here, so keep looking */
1690 if (core->parents[i].core)
1691 continue;
1692
1693 /* Maybe core hasn't been cached but the hw is all we know? */
1694 if (core->parents[i].hw) {
1695 if (core->parents[i].hw == parent->hw)
1696 break;
1697
1698 /* Didn't match, but we're expecting a clk_hw */
1699 continue;
1700 }
1701
1702 /* Maybe it hasn't been cached (clk_set_parent() path) */
1703 if (parent == clk_core_get(core, i))
1704 break;
1705
1706 /* Fallback to comparing globally unique names */
1707 if (core->parents[i].name &&
1708 !strcmp(parent->name, core->parents[i].name))
1709 break;
1710 }
1711
1712 if (i == core->num_parents)
1713 return -EINVAL;
1714
1715 core->parents[i].core = parent;
1716 return i;
1717 }
1718
clk_core_hold_state(struct clk_core * core)1719 static void clk_core_hold_state(struct clk_core *core)
1720 {
1721 if (core->need_sync || !core->boot_enabled)
1722 return;
1723
1724 if (core->orphan || !dev_has_sync_state(core->dev))
1725 return;
1726
1727 if (core->flags & CLK_DONT_HOLD_STATE)
1728 return;
1729
1730 core->need_sync = !clk_core_prepare_enable(core);
1731 }
1732
__clk_core_update_orphan_hold_state(struct clk_core * core)1733 static void __clk_core_update_orphan_hold_state(struct clk_core *core)
1734 {
1735 struct clk_core *child;
1736
1737 if (core->orphan)
1738 return;
1739
1740 clk_core_hold_state(core);
1741
1742 hlist_for_each_entry(child, &core->children, child_node)
1743 __clk_core_update_orphan_hold_state(child);
1744 }
1745
1746 /*
1747 * Update the orphan status of @core and all its children.
1748 */
clk_core_update_orphan_status(struct clk_core * core,bool is_orphan)1749 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1750 {
1751 struct clk_core *child;
1752
1753 core->orphan = is_orphan;
1754
1755 hlist_for_each_entry(child, &core->children, child_node)
1756 clk_core_update_orphan_status(child, is_orphan);
1757 }
1758
clk_reparent(struct clk_core * core,struct clk_core * new_parent)1759 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1760 {
1761 bool was_orphan = core->orphan;
1762
1763 hlist_del(&core->child_node);
1764
1765 if (new_parent) {
1766 bool becomes_orphan = new_parent->orphan;
1767
1768 /* avoid duplicate POST_RATE_CHANGE notifications */
1769 if (new_parent->new_child == core)
1770 new_parent->new_child = NULL;
1771
1772 hlist_add_head(&core->child_node, &new_parent->children);
1773
1774 if (was_orphan != becomes_orphan)
1775 clk_core_update_orphan_status(core, becomes_orphan);
1776 } else {
1777 hlist_add_head(&core->child_node, &clk_orphan_list);
1778 if (!was_orphan)
1779 clk_core_update_orphan_status(core, true);
1780 }
1781
1782 core->parent = new_parent;
1783 }
1784
__clk_set_parent_before(struct clk_core * core,struct clk_core * parent)1785 static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1786 struct clk_core *parent)
1787 {
1788 unsigned long flags;
1789 struct clk_core *old_parent = core->parent;
1790
1791 /*
1792 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1793 *
1794 * 2. Migrate prepare state between parents and prevent race with
1795 * clk_enable().
1796 *
1797 * If the clock is not prepared, then a race with
1798 * clk_enable/disable() is impossible since we already have the
1799 * prepare lock (future calls to clk_enable() need to be preceded by
1800 * a clk_prepare()).
1801 *
1802 * If the clock is prepared, migrate the prepared state to the new
1803 * parent and also protect against a race with clk_enable() by
1804 * forcing the clock and the new parent on. This ensures that all
1805 * future calls to clk_enable() are practically NOPs with respect to
1806 * hardware and software states.
1807 *
1808 * See also: Comment for clk_set_parent() below.
1809 */
1810
1811 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1812 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1813 clk_core_prepare_enable(old_parent);
1814 clk_core_prepare_enable(parent);
1815 }
1816
1817 /* migrate prepare count if > 0 */
1818 if (core->prepare_count) {
1819 clk_core_prepare_enable(parent);
1820 clk_core_enable_lock(core);
1821 }
1822
1823 /* update the clk tree topology */
1824 flags = clk_enable_lock();
1825 clk_reparent(core, parent);
1826 clk_enable_unlock(flags);
1827
1828 return old_parent;
1829 }
1830
__clk_set_parent_after(struct clk_core * core,struct clk_core * parent,struct clk_core * old_parent)1831 static void __clk_set_parent_after(struct clk_core *core,
1832 struct clk_core *parent,
1833 struct clk_core *old_parent)
1834 {
1835 /*
1836 * Finish the migration of prepare state and undo the changes done
1837 * for preventing a race with clk_enable().
1838 */
1839 if (core->prepare_count) {
1840 clk_core_disable_lock(core);
1841 clk_core_disable_unprepare(old_parent);
1842 }
1843
1844 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1845 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1846 clk_core_disable_unprepare(parent);
1847 clk_core_disable_unprepare(old_parent);
1848 }
1849 }
1850
__clk_set_parent(struct clk_core * core,struct clk_core * parent,u8 p_index)1851 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1852 u8 p_index)
1853 {
1854 unsigned long flags;
1855 int ret = 0;
1856 struct clk_core *old_parent;
1857
1858 old_parent = __clk_set_parent_before(core, parent);
1859
1860 trace_clk_set_parent(core, parent);
1861
1862 /* change clock input source */
1863 if (parent && core->ops->set_parent)
1864 ret = core->ops->set_parent(core->hw, p_index);
1865
1866 trace_clk_set_parent_complete(core, parent);
1867
1868 if (ret) {
1869 flags = clk_enable_lock();
1870 clk_reparent(core, old_parent);
1871 clk_enable_unlock(flags);
1872 __clk_set_parent_after(core, old_parent, parent);
1873
1874 return ret;
1875 }
1876
1877 __clk_set_parent_after(core, parent, old_parent);
1878
1879 return 0;
1880 }
1881
1882 /**
1883 * __clk_speculate_rates
1884 * @core: first clk in the subtree
1885 * @parent_rate: the "future" rate of clk's parent
1886 *
1887 * Walks the subtree of clks starting with clk, speculating rates as it
1888 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1889 *
1890 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1891 * pre-rate change notifications and returns early if no clks in the
1892 * subtree have subscribed to the notifications. Note that if a clk does not
1893 * implement the .recalc_rate callback then it is assumed that the clock will
1894 * take on the rate of its parent.
1895 */
__clk_speculate_rates(struct clk_core * core,unsigned long parent_rate)1896 static int __clk_speculate_rates(struct clk_core *core,
1897 unsigned long parent_rate)
1898 {
1899 struct clk_core *child;
1900 unsigned long new_rate;
1901 int ret = NOTIFY_DONE;
1902
1903 lockdep_assert_held(&prepare_lock);
1904
1905 new_rate = clk_recalc(core, parent_rate);
1906
1907 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1908 if (core->notifier_count)
1909 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1910
1911 if (ret & NOTIFY_STOP_MASK) {
1912 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1913 __func__, core->name, ret);
1914 goto out;
1915 }
1916
1917 hlist_for_each_entry(child, &core->children, child_node) {
1918 ret = __clk_speculate_rates(child, new_rate);
1919 if (ret & NOTIFY_STOP_MASK)
1920 break;
1921 }
1922
1923 out:
1924 return ret;
1925 }
1926
clk_calc_subtree(struct clk_core * core,unsigned long new_rate,struct clk_core * new_parent,u8 p_index)1927 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1928 struct clk_core *new_parent, u8 p_index)
1929 {
1930 struct clk_core *child;
1931
1932 core->new_rate = new_rate;
1933 core->new_parent = new_parent;
1934 core->new_parent_index = p_index;
1935 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1936 core->new_child = NULL;
1937 if (new_parent && new_parent != core->parent)
1938 new_parent->new_child = core;
1939
1940 hlist_for_each_entry(child, &core->children, child_node) {
1941 child->new_rate = clk_recalc(child, new_rate);
1942 clk_calc_subtree(child, child->new_rate, NULL, 0);
1943 }
1944 }
1945
1946 /*
1947 * calculate the new rates returning the topmost clock that has to be
1948 * changed.
1949 */
clk_calc_new_rates(struct clk_core * core,unsigned long rate)1950 static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1951 unsigned long rate)
1952 {
1953 struct clk_core *top = core;
1954 struct clk_core *old_parent, *parent;
1955 unsigned long best_parent_rate = 0;
1956 unsigned long new_rate;
1957 unsigned long min_rate;
1958 unsigned long max_rate;
1959 int p_index = 0;
1960 long ret;
1961
1962 /* sanity */
1963 if (IS_ERR_OR_NULL(core))
1964 return NULL;
1965
1966 /* save parent rate, if it exists */
1967 parent = old_parent = core->parent;
1968 if (parent)
1969 best_parent_rate = parent->rate;
1970
1971 clk_core_get_boundaries(core, &min_rate, &max_rate);
1972
1973 /* find the closest rate and parent clk/rate */
1974 if (clk_core_can_round(core)) {
1975 struct clk_rate_request req;
1976
1977 req.rate = rate;
1978 req.min_rate = min_rate;
1979 req.max_rate = max_rate;
1980
1981 clk_core_init_rate_req(core, &req);
1982
1983 ret = clk_core_determine_round_nolock(core, &req);
1984 if (ret < 0)
1985 return NULL;
1986
1987 best_parent_rate = req.best_parent_rate;
1988 new_rate = req.rate;
1989 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1990
1991 if (new_rate < min_rate || new_rate > max_rate)
1992 return NULL;
1993 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1994 /* pass-through clock without adjustable parent */
1995 core->new_rate = core->rate;
1996 return NULL;
1997 } else {
1998 /* pass-through clock with adjustable parent */
1999 top = clk_calc_new_rates(parent, rate);
2000 new_rate = parent->new_rate;
2001 goto out;
2002 }
2003
2004 /* some clocks must be gated to change parent */
2005 if (parent != old_parent &&
2006 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
2007 pr_debug("%s: %s not gated but wants to reparent\n",
2008 __func__, core->name);
2009 return NULL;
2010 }
2011
2012 /* try finding the new parent index */
2013 if (parent && core->num_parents > 1) {
2014 p_index = clk_fetch_parent_index(core, parent);
2015 if (p_index < 0) {
2016 pr_debug("%s: clk %s can not be parent of clk %s\n",
2017 __func__, parent->name, core->name);
2018 return NULL;
2019 }
2020 }
2021
2022 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
2023 best_parent_rate != parent->rate)
2024 top = clk_calc_new_rates(parent, best_parent_rate);
2025
2026 out:
2027 clk_calc_subtree(core, new_rate, parent, p_index);
2028
2029 return top;
2030 }
2031
2032 /*
2033 * Notify about rate changes in a subtree. Always walk down the whole tree
2034 * so that in case of an error we can walk down the whole tree again and
2035 * abort the change.
2036 */
clk_propagate_rate_change(struct clk_core * core,unsigned long event)2037 static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
2038 unsigned long event)
2039 {
2040 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
2041 int ret = NOTIFY_DONE;
2042
2043 if (core->rate == core->new_rate)
2044 return NULL;
2045
2046 if (core->notifier_count) {
2047 ret = __clk_notify(core, event, core->rate, core->new_rate);
2048 if (ret & NOTIFY_STOP_MASK)
2049 fail_clk = core;
2050 }
2051
2052 if (core->ops->pre_rate_change) {
2053 ret = core->ops->pre_rate_change(core->hw, core->rate,
2054 core->new_rate);
2055 if (ret)
2056 fail_clk = core;
2057 }
2058
2059 hlist_for_each_entry(child, &core->children, child_node) {
2060 /* Skip children who will be reparented to another clock */
2061 if (child->new_parent && child->new_parent != core)
2062 continue;
2063 tmp_clk = clk_propagate_rate_change(child, event);
2064 if (tmp_clk)
2065 fail_clk = tmp_clk;
2066 }
2067
2068 /* handle the new child who might not be in core->children yet */
2069 if (core->new_child) {
2070 tmp_clk = clk_propagate_rate_change(core->new_child, event);
2071 if (tmp_clk)
2072 fail_clk = tmp_clk;
2073 }
2074
2075 return fail_clk;
2076 }
2077
2078 /*
2079 * walk down a subtree and set the new rates notifying the rate
2080 * change on the way
2081 */
clk_change_rate(struct clk_core * core)2082 static void clk_change_rate(struct clk_core *core)
2083 {
2084 struct clk_core *child;
2085 struct hlist_node *tmp;
2086 unsigned long old_rate;
2087 unsigned long best_parent_rate = 0;
2088 bool skip_set_rate = false;
2089 struct clk_core *old_parent;
2090 struct clk_core *parent = NULL;
2091
2092 old_rate = core->rate;
2093
2094 if (core->new_parent) {
2095 parent = core->new_parent;
2096 best_parent_rate = core->new_parent->rate;
2097 } else if (core->parent) {
2098 parent = core->parent;
2099 best_parent_rate = core->parent->rate;
2100 }
2101
2102 if (clk_pm_runtime_get(core))
2103 return;
2104
2105 if (core->flags & CLK_SET_RATE_UNGATE) {
2106 unsigned long flags;
2107
2108 clk_core_prepare(core);
2109 flags = clk_enable_lock();
2110 clk_core_enable(core);
2111 clk_enable_unlock(flags);
2112 }
2113
2114 if (core->new_parent && core->new_parent != core->parent) {
2115 old_parent = __clk_set_parent_before(core, core->new_parent);
2116 trace_clk_set_parent(core, core->new_parent);
2117
2118 if (core->ops->set_rate_and_parent) {
2119 skip_set_rate = true;
2120 core->ops->set_rate_and_parent(core->hw, core->new_rate,
2121 best_parent_rate,
2122 core->new_parent_index);
2123 } else if (core->ops->set_parent) {
2124 core->ops->set_parent(core->hw, core->new_parent_index);
2125 }
2126
2127 trace_clk_set_parent_complete(core, core->new_parent);
2128 __clk_set_parent_after(core, core->new_parent, old_parent);
2129 }
2130
2131 if (core->flags & CLK_OPS_PARENT_ENABLE)
2132 clk_core_prepare_enable(parent);
2133
2134 trace_clk_set_rate(core, core->new_rate);
2135
2136 if (!skip_set_rate && core->ops->set_rate)
2137 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2138
2139 trace_clk_set_rate_complete(core, core->new_rate);
2140
2141 core->rate = clk_recalc(core, best_parent_rate);
2142
2143 if (core->flags & CLK_SET_RATE_UNGATE) {
2144 unsigned long flags;
2145
2146 flags = clk_enable_lock();
2147 clk_core_disable(core);
2148 clk_enable_unlock(flags);
2149 clk_core_unprepare(core);
2150 }
2151
2152 if (core->flags & CLK_OPS_PARENT_ENABLE)
2153 clk_core_disable_unprepare(parent);
2154
2155 if (core->notifier_count && old_rate != core->rate)
2156 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2157
2158 if (core->flags & CLK_RECALC_NEW_RATES)
2159 (void)clk_calc_new_rates(core, core->new_rate);
2160
2161 if (core->ops->post_rate_change)
2162 core->ops->post_rate_change(core->hw, old_rate, core->rate);
2163
2164 /*
2165 * Use safe iteration, as change_rate can actually swap parents
2166 * for certain clock types.
2167 */
2168 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2169 /* Skip children who will be reparented to another clock */
2170 if (child->new_parent && child->new_parent != core)
2171 continue;
2172 clk_change_rate(child);
2173 }
2174
2175 /* handle the new child who might not be in core->children yet */
2176 if (core->new_child)
2177 clk_change_rate(core->new_child);
2178
2179 clk_pm_runtime_put(core);
2180 }
2181
clk_core_req_round_rate_nolock(struct clk_core * core,unsigned long req_rate)2182 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2183 unsigned long req_rate)
2184 {
2185 int ret, cnt;
2186 struct clk_rate_request req;
2187
2188 lockdep_assert_held(&prepare_lock);
2189
2190 if (!core)
2191 return 0;
2192
2193 /* simulate what the rate would be if it could be freely set */
2194 cnt = clk_core_rate_nuke_protect(core);
2195 if (cnt < 0)
2196 return cnt;
2197
2198 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
2199 req.rate = req_rate;
2200
2201 ret = clk_core_round_rate_nolock(core, &req);
2202
2203 /* restore the protection */
2204 clk_core_rate_restore_protect(core, cnt);
2205
2206 return ret ? 0 : req.rate;
2207 }
2208
clk_core_set_rate_nolock(struct clk_core * core,unsigned long req_rate)2209 static int clk_core_set_rate_nolock(struct clk_core *core,
2210 unsigned long req_rate)
2211 {
2212 struct clk_core *top, *fail_clk;
2213 unsigned long rate;
2214 int ret = 0;
2215
2216 if (!core)
2217 return 0;
2218
2219 rate = clk_core_req_round_rate_nolock(core, req_rate);
2220
2221 /* bail early if nothing to do */
2222 if (rate == clk_core_get_rate_nolock(core))
2223 return 0;
2224
2225 /* fail on a direct rate set of a protected provider */
2226 if (clk_core_rate_is_protected(core))
2227 return -EBUSY;
2228
2229 /* calculate new rates and get the topmost changed clock */
2230 top = clk_calc_new_rates(core, req_rate);
2231 if (!top)
2232 return -EINVAL;
2233
2234 ret = clk_pm_runtime_get(core);
2235 if (ret)
2236 return ret;
2237
2238 /* notify that we are about to change rates */
2239 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2240 if (fail_clk) {
2241 pr_debug("%s: failed to set %s rate\n", __func__,
2242 fail_clk->name);
2243 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2244 ret = -EBUSY;
2245 goto err;
2246 }
2247
2248 /* change the rates */
2249 clk_change_rate(top);
2250
2251 core->req_rate = req_rate;
2252 err:
2253 clk_pm_runtime_put(core);
2254
2255 return ret;
2256 }
2257
2258 /**
2259 * clk_set_rate - specify a new rate for clk
2260 * @clk: the clk whose rate is being changed
2261 * @rate: the new rate for clk
2262 *
2263 * In the simplest case clk_set_rate will only adjust the rate of clk.
2264 *
2265 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
2266 * propagate up to clk's parent; whether or not this happens depends on the
2267 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
2268 * after calling .round_rate then upstream parent propagation is ignored. If
2269 * *parent_rate comes back with a new rate for clk's parent then we propagate
2270 * up to clk's parent and set its rate. Upward propagation will continue
2271 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
2272 * .round_rate stops requesting changes to clk's parent_rate.
2273 *
2274 * Rate changes are accomplished via tree traversal that also recalculates the
2275 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
2276 *
2277 * Returns 0 on success, -EERROR otherwise.
2278 */
clk_set_rate(struct clk * clk,unsigned long rate)2279 int clk_set_rate(struct clk *clk, unsigned long rate)
2280 {
2281 int ret;
2282
2283 if (!clk)
2284 return 0;
2285
2286 /* prevent racing with updates to the clock topology */
2287 clk_prepare_lock();
2288
2289 if (clk->exclusive_count)
2290 clk_core_rate_unprotect(clk->core);
2291
2292 ret = clk_core_set_rate_nolock(clk->core, rate);
2293
2294 if (clk->exclusive_count)
2295 clk_core_rate_protect(clk->core);
2296
2297 clk_prepare_unlock();
2298
2299 return ret;
2300 }
2301 EXPORT_SYMBOL_GPL(clk_set_rate);
2302
2303 /**
2304 * clk_set_rate_exclusive - specify a new rate and get exclusive control
2305 * @clk: the clk whose rate is being changed
2306 * @rate: the new rate for clk
2307 *
2308 * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
2309 * within a critical section
2310 *
2311 * This can be used initially to ensure that at least 1 consumer is
2312 * satisfied when several consumers are competing for exclusivity over the
2313 * same clock provider.
2314 *
2315 * The exclusivity is not applied if setting the rate failed.
2316 *
2317 * Calls to clk_rate_exclusive_get() should be balanced with calls to
2318 * clk_rate_exclusive_put().
2319 *
2320 * Returns 0 on success, -EERROR otherwise.
2321 */
clk_set_rate_exclusive(struct clk * clk,unsigned long rate)2322 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2323 {
2324 int ret;
2325
2326 if (!clk)
2327 return 0;
2328
2329 /* prevent racing with updates to the clock topology */
2330 clk_prepare_lock();
2331
2332 /*
2333 * The temporary protection removal is not here, on purpose
2334 * This function is meant to be used instead of clk_rate_protect,
2335 * so before the consumer code path protect the clock provider
2336 */
2337
2338 ret = clk_core_set_rate_nolock(clk->core, rate);
2339 if (!ret) {
2340 clk_core_rate_protect(clk->core);
2341 clk->exclusive_count++;
2342 }
2343
2344 clk_prepare_unlock();
2345
2346 return ret;
2347 }
2348 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2349
2350 /**
2351 * clk_set_rate_range - set a rate range for a clock source
2352 * @clk: clock source
2353 * @min: desired minimum clock rate in Hz, inclusive
2354 * @max: desired maximum clock rate in Hz, inclusive
2355 *
2356 * Returns success (0) or negative errno.
2357 */
clk_set_rate_range(struct clk * clk,unsigned long min,unsigned long max)2358 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2359 {
2360 int ret = 0;
2361 unsigned long old_min, old_max, rate;
2362
2363 if (!clk)
2364 return 0;
2365
2366 if (min > max) {
2367 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2368 __func__, clk->core->name, clk->dev_id, clk->con_id,
2369 min, max);
2370 return -EINVAL;
2371 }
2372
2373 clk_prepare_lock();
2374
2375 if (clk->exclusive_count)
2376 clk_core_rate_unprotect(clk->core);
2377
2378 /* Save the current values in case we need to rollback the change */
2379 old_min = clk->min_rate;
2380 old_max = clk->max_rate;
2381 clk->min_rate = min;
2382 clk->max_rate = max;
2383
2384 rate = clk_core_get_rate_nolock(clk->core);
2385 if (rate < min || rate > max) {
2386 /*
2387 * FIXME:
2388 * We are in bit of trouble here, current rate is outside the
2389 * the requested range. We are going try to request appropriate
2390 * range boundary but there is a catch. It may fail for the
2391 * usual reason (clock broken, clock protected, etc) but also
2392 * because:
2393 * - round_rate() was not favorable and fell on the wrong
2394 * side of the boundary
2395 * - the determine_rate() callback does not really check for
2396 * this corner case when determining the rate
2397 */
2398
2399 if (rate < min)
2400 rate = min;
2401 else
2402 rate = max;
2403
2404 ret = clk_core_set_rate_nolock(clk->core, rate);
2405 if (ret) {
2406 /* rollback the changes */
2407 clk->min_rate = old_min;
2408 clk->max_rate = old_max;
2409 }
2410 }
2411
2412 if (clk->exclusive_count)
2413 clk_core_rate_protect(clk->core);
2414
2415 clk_prepare_unlock();
2416
2417 return ret;
2418 }
2419 EXPORT_SYMBOL_GPL(clk_set_rate_range);
2420
2421 /**
2422 * clk_set_min_rate - set a minimum clock rate for a clock source
2423 * @clk: clock source
2424 * @rate: desired minimum clock rate in Hz, inclusive
2425 *
2426 * Returns success (0) or negative errno.
2427 */
clk_set_min_rate(struct clk * clk,unsigned long rate)2428 int clk_set_min_rate(struct clk *clk, unsigned long rate)
2429 {
2430 if (!clk)
2431 return 0;
2432
2433 return clk_set_rate_range(clk, rate, clk->max_rate);
2434 }
2435 EXPORT_SYMBOL_GPL(clk_set_min_rate);
2436
2437 /**
2438 * clk_set_max_rate - set a maximum clock rate for a clock source
2439 * @clk: clock source
2440 * @rate: desired maximum clock rate in Hz, inclusive
2441 *
2442 * Returns success (0) or negative errno.
2443 */
clk_set_max_rate(struct clk * clk,unsigned long rate)2444 int clk_set_max_rate(struct clk *clk, unsigned long rate)
2445 {
2446 if (!clk)
2447 return 0;
2448
2449 return clk_set_rate_range(clk, clk->min_rate, rate);
2450 }
2451 EXPORT_SYMBOL_GPL(clk_set_max_rate);
2452
2453 /**
2454 * clk_get_parent - return the parent of a clk
2455 * @clk: the clk whose parent gets returned
2456 *
2457 * Simply returns clk->parent. Returns NULL if clk is NULL.
2458 */
clk_get_parent(struct clk * clk)2459 struct clk *clk_get_parent(struct clk *clk)
2460 {
2461 struct clk *parent;
2462
2463 if (!clk)
2464 return NULL;
2465
2466 clk_prepare_lock();
2467 /* TODO: Create a per-user clk and change callers to call clk_put */
2468 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2469 clk_prepare_unlock();
2470
2471 return parent;
2472 }
2473 EXPORT_SYMBOL_GPL(clk_get_parent);
2474
__clk_init_parent(struct clk_core * core)2475 static struct clk_core *__clk_init_parent(struct clk_core *core)
2476 {
2477 u8 index = 0;
2478
2479 if (core->num_parents > 1 && core->ops->get_parent)
2480 index = core->ops->get_parent(core->hw);
2481
2482 return clk_core_get_parent_by_index(core, index);
2483 }
2484
clk_core_reparent(struct clk_core * core,struct clk_core * new_parent)2485 static void clk_core_reparent(struct clk_core *core,
2486 struct clk_core *new_parent)
2487 {
2488 clk_reparent(core, new_parent);
2489 __clk_recalc_accuracies(core);
2490 __clk_recalc_rates(core, POST_RATE_CHANGE);
2491 }
2492
clk_hw_reparent(struct clk_hw * hw,struct clk_hw * new_parent)2493 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2494 {
2495 if (!hw)
2496 return;
2497
2498 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2499 }
2500
2501 /**
2502 * clk_has_parent - check if a clock is a possible parent for another
2503 * @clk: clock source
2504 * @parent: parent clock source
2505 *
2506 * This function can be used in drivers that need to check that a clock can be
2507 * the parent of another without actually changing the parent.
2508 *
2509 * Returns true if @parent is a possible parent for @clk, false otherwise.
2510 */
clk_has_parent(struct clk * clk,struct clk * parent)2511 bool clk_has_parent(struct clk *clk, struct clk *parent)
2512 {
2513 struct clk_core *core, *parent_core;
2514 int i;
2515
2516 /* NULL clocks should be nops, so return success if either is NULL. */
2517 if (!clk || !parent)
2518 return true;
2519
2520 core = clk->core;
2521 parent_core = parent->core;
2522
2523 /* Optimize for the case where the parent is already the parent. */
2524 if (core->parent == parent_core)
2525 return true;
2526
2527 for (i = 0; i < core->num_parents; i++)
2528 if (!strcmp(core->parents[i].name, parent_core->name))
2529 return true;
2530
2531 return false;
2532 }
2533 EXPORT_SYMBOL_GPL(clk_has_parent);
2534
clk_core_set_parent_nolock(struct clk_core * core,struct clk_core * parent)2535 static int clk_core_set_parent_nolock(struct clk_core *core,
2536 struct clk_core *parent)
2537 {
2538 int ret = 0;
2539 int p_index = 0;
2540 unsigned long p_rate = 0;
2541
2542 lockdep_assert_held(&prepare_lock);
2543
2544 if (!core)
2545 return 0;
2546
2547 if (core->parent == parent)
2548 return 0;
2549
2550 /* verify ops for multi-parent clks */
2551 if (core->num_parents > 1 && !core->ops->set_parent)
2552 return -EPERM;
2553
2554 /* check that we are allowed to re-parent if the clock is in use */
2555 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2556 return -EBUSY;
2557
2558 if (clk_core_rate_is_protected(core))
2559 return -EBUSY;
2560
2561 /* try finding the new parent index */
2562 if (parent) {
2563 p_index = clk_fetch_parent_index(core, parent);
2564 if (p_index < 0) {
2565 pr_debug("%s: clk %s can not be parent of clk %s\n",
2566 __func__, parent->name, core->name);
2567 return p_index;
2568 }
2569 p_rate = parent->rate;
2570 }
2571
2572 ret = clk_pm_runtime_get(core);
2573 if (ret)
2574 return ret;
2575
2576 /* propagate PRE_RATE_CHANGE notifications */
2577 ret = __clk_speculate_rates(core, p_rate);
2578
2579 /* abort if a driver objects */
2580 if (ret & NOTIFY_STOP_MASK)
2581 goto runtime_put;
2582
2583 /* do the re-parent */
2584 ret = __clk_set_parent(core, parent, p_index);
2585
2586 /* propagate rate an accuracy recalculation accordingly */
2587 if (ret) {
2588 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2589 } else {
2590 __clk_recalc_rates(core, POST_RATE_CHANGE);
2591 __clk_recalc_accuracies(core);
2592 }
2593
2594 runtime_put:
2595 clk_pm_runtime_put(core);
2596
2597 return ret;
2598 }
2599
clk_hw_set_parent(struct clk_hw * hw,struct clk_hw * parent)2600 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2601 {
2602 return clk_core_set_parent_nolock(hw->core, parent->core);
2603 }
2604 EXPORT_SYMBOL_GPL(clk_hw_set_parent);
2605
2606 /**
2607 * clk_set_parent - switch the parent of a mux clk
2608 * @clk: the mux clk whose input we are switching
2609 * @parent: the new input to clk
2610 *
2611 * Re-parent clk to use parent as its new input source. If clk is in
2612 * prepared state, the clk will get enabled for the duration of this call. If
2613 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2614 * that, the reparenting is glitchy in hardware, etc), use the
2615 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2616 *
2617 * After successfully changing clk's parent clk_set_parent will update the
2618 * clk topology, sysfs topology and propagate rate recalculation via
2619 * __clk_recalc_rates.
2620 *
2621 * Returns 0 on success, -EERROR otherwise.
2622 */
clk_set_parent(struct clk * clk,struct clk * parent)2623 int clk_set_parent(struct clk *clk, struct clk *parent)
2624 {
2625 int ret;
2626
2627 if (!clk)
2628 return 0;
2629
2630 clk_prepare_lock();
2631
2632 if (clk->exclusive_count)
2633 clk_core_rate_unprotect(clk->core);
2634
2635 ret = clk_core_set_parent_nolock(clk->core,
2636 parent ? parent->core : NULL);
2637
2638 if (clk->exclusive_count)
2639 clk_core_rate_protect(clk->core);
2640
2641 clk_prepare_unlock();
2642
2643 return ret;
2644 }
2645 EXPORT_SYMBOL_GPL(clk_set_parent);
2646
clk_core_set_phase_nolock(struct clk_core * core,int degrees)2647 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2648 {
2649 int ret = -EINVAL;
2650
2651 lockdep_assert_held(&prepare_lock);
2652
2653 if (!core)
2654 return 0;
2655
2656 if (clk_core_rate_is_protected(core))
2657 return -EBUSY;
2658
2659 trace_clk_set_phase(core, degrees);
2660
2661 if (core->ops->set_phase) {
2662 ret = core->ops->set_phase(core->hw, degrees);
2663 if (!ret)
2664 core->phase = degrees;
2665 }
2666
2667 trace_clk_set_phase_complete(core, degrees);
2668
2669 return ret;
2670 }
2671
2672 /**
2673 * clk_set_phase - adjust the phase shift of a clock signal
2674 * @clk: clock signal source
2675 * @degrees: number of degrees the signal is shifted
2676 *
2677 * Shifts the phase of a clock signal by the specified
2678 * degrees. Returns 0 on success, -EERROR otherwise.
2679 *
2680 * This function makes no distinction about the input or reference
2681 * signal that we adjust the clock signal phase against. For example
2682 * phase locked-loop clock signal generators we may shift phase with
2683 * respect to feedback clock signal input, but for other cases the
2684 * clock phase may be shifted with respect to some other, unspecified
2685 * signal.
2686 *
2687 * Additionally the concept of phase shift does not propagate through
2688 * the clock tree hierarchy, which sets it apart from clock rates and
2689 * clock accuracy. A parent clock phase attribute does not have an
2690 * impact on the phase attribute of a child clock.
2691 */
clk_set_phase(struct clk * clk,int degrees)2692 int clk_set_phase(struct clk *clk, int degrees)
2693 {
2694 int ret;
2695
2696 if (!clk)
2697 return 0;
2698
2699 /* sanity check degrees */
2700 degrees %= 360;
2701 if (degrees < 0)
2702 degrees += 360;
2703
2704 clk_prepare_lock();
2705
2706 if (clk->exclusive_count)
2707 clk_core_rate_unprotect(clk->core);
2708
2709 ret = clk_core_set_phase_nolock(clk->core, degrees);
2710
2711 if (clk->exclusive_count)
2712 clk_core_rate_protect(clk->core);
2713
2714 clk_prepare_unlock();
2715
2716 return ret;
2717 }
2718 EXPORT_SYMBOL_GPL(clk_set_phase);
2719
clk_core_get_phase(struct clk_core * core)2720 static int clk_core_get_phase(struct clk_core *core)
2721 {
2722 int ret;
2723
2724 clk_prepare_lock();
2725 /* Always try to update cached phase if possible */
2726 if (core->ops->get_phase)
2727 core->phase = core->ops->get_phase(core->hw);
2728 ret = core->phase;
2729 clk_prepare_unlock();
2730
2731 return ret;
2732 }
2733
2734 /**
2735 * clk_get_phase - return the phase shift of a clock signal
2736 * @clk: clock signal source
2737 *
2738 * Returns the phase shift of a clock node in degrees, otherwise returns
2739 * -EERROR.
2740 */
clk_get_phase(struct clk * clk)2741 int clk_get_phase(struct clk *clk)
2742 {
2743 if (!clk)
2744 return 0;
2745
2746 return clk_core_get_phase(clk->core);
2747 }
2748 EXPORT_SYMBOL_GPL(clk_get_phase);
2749
clk_core_reset_duty_cycle_nolock(struct clk_core * core)2750 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2751 {
2752 /* Assume a default value of 50% */
2753 core->duty.num = 1;
2754 core->duty.den = 2;
2755 }
2756
2757 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2758
clk_core_update_duty_cycle_nolock(struct clk_core * core)2759 static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2760 {
2761 struct clk_duty *duty = &core->duty;
2762 int ret = 0;
2763
2764 if (!core->ops->get_duty_cycle)
2765 return clk_core_update_duty_cycle_parent_nolock(core);
2766
2767 ret = core->ops->get_duty_cycle(core->hw, duty);
2768 if (ret)
2769 goto reset;
2770
2771 /* Don't trust the clock provider too much */
2772 if (duty->den == 0 || duty->num > duty->den) {
2773 ret = -EINVAL;
2774 goto reset;
2775 }
2776
2777 return 0;
2778
2779 reset:
2780 clk_core_reset_duty_cycle_nolock(core);
2781 return ret;
2782 }
2783
clk_core_update_duty_cycle_parent_nolock(struct clk_core * core)2784 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
2785 {
2786 int ret = 0;
2787
2788 if (core->parent &&
2789 core->flags & CLK_DUTY_CYCLE_PARENT) {
2790 ret = clk_core_update_duty_cycle_nolock(core->parent);
2791 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2792 } else {
2793 clk_core_reset_duty_cycle_nolock(core);
2794 }
2795
2796 return ret;
2797 }
2798
2799 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2800 struct clk_duty *duty);
2801
clk_core_set_duty_cycle_nolock(struct clk_core * core,struct clk_duty * duty)2802 static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
2803 struct clk_duty *duty)
2804 {
2805 int ret;
2806
2807 lockdep_assert_held(&prepare_lock);
2808
2809 if (clk_core_rate_is_protected(core))
2810 return -EBUSY;
2811
2812 trace_clk_set_duty_cycle(core, duty);
2813
2814 if (!core->ops->set_duty_cycle)
2815 return clk_core_set_duty_cycle_parent_nolock(core, duty);
2816
2817 ret = core->ops->set_duty_cycle(core->hw, duty);
2818 if (!ret)
2819 memcpy(&core->duty, duty, sizeof(*duty));
2820
2821 trace_clk_set_duty_cycle_complete(core, duty);
2822
2823 return ret;
2824 }
2825
clk_core_set_duty_cycle_parent_nolock(struct clk_core * core,struct clk_duty * duty)2826 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2827 struct clk_duty *duty)
2828 {
2829 int ret = 0;
2830
2831 if (core->parent &&
2832 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2833 ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2834 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2835 }
2836
2837 return ret;
2838 }
2839
2840 /**
2841 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
2842 * @clk: clock signal source
2843 * @num: numerator of the duty cycle ratio to be applied
2844 * @den: denominator of the duty cycle ratio to be applied
2845 *
2846 * Apply the duty cycle ratio if the ratio is valid and the clock can
2847 * perform this operation
2848 *
2849 * Returns (0) on success, a negative errno otherwise.
2850 */
clk_set_duty_cycle(struct clk * clk,unsigned int num,unsigned int den)2851 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2852 {
2853 int ret;
2854 struct clk_duty duty;
2855
2856 if (!clk)
2857 return 0;
2858
2859 /* sanity check the ratio */
2860 if (den == 0 || num > den)
2861 return -EINVAL;
2862
2863 duty.num = num;
2864 duty.den = den;
2865
2866 clk_prepare_lock();
2867
2868 if (clk->exclusive_count)
2869 clk_core_rate_unprotect(clk->core);
2870
2871 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2872
2873 if (clk->exclusive_count)
2874 clk_core_rate_protect(clk->core);
2875
2876 clk_prepare_unlock();
2877
2878 return ret;
2879 }
2880 EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
2881
clk_core_get_scaled_duty_cycle(struct clk_core * core,unsigned int scale)2882 static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
2883 unsigned int scale)
2884 {
2885 struct clk_duty *duty = &core->duty;
2886 int ret;
2887
2888 clk_prepare_lock();
2889
2890 ret = clk_core_update_duty_cycle_nolock(core);
2891 if (!ret)
2892 ret = mult_frac(scale, duty->num, duty->den);
2893
2894 clk_prepare_unlock();
2895
2896 return ret;
2897 }
2898
2899 /**
2900 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
2901 * @clk: clock signal source
2902 * @scale: scaling factor to be applied to represent the ratio as an integer
2903 *
2904 * Returns the duty cycle ratio of a clock node multiplied by the provided
2905 * scaling factor, or negative errno on error.
2906 */
clk_get_scaled_duty_cycle(struct clk * clk,unsigned int scale)2907 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2908 {
2909 if (!clk)
2910 return 0;
2911
2912 return clk_core_get_scaled_duty_cycle(clk->core, scale);
2913 }
2914 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
2915
2916 /**
2917 * clk_is_match - check if two clk's point to the same hardware clock
2918 * @p: clk compared against q
2919 * @q: clk compared against p
2920 *
2921 * Returns true if the two struct clk pointers both point to the same hardware
2922 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2923 * share the same struct clk_core object.
2924 *
2925 * Returns false otherwise. Note that two NULL clks are treated as matching.
2926 */
clk_is_match(const struct clk * p,const struct clk * q)2927 bool clk_is_match(const struct clk *p, const struct clk *q)
2928 {
2929 /* trivial case: identical struct clk's or both NULL */
2930 if (p == q)
2931 return true;
2932
2933 /* true if clk->core pointers match. Avoid dereferencing garbage */
2934 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2935 if (p->core == q->core)
2936 return true;
2937
2938 return false;
2939 }
2940 EXPORT_SYMBOL_GPL(clk_is_match);
2941
2942 /*** debugfs support ***/
2943
2944 #ifdef CONFIG_DEBUG_FS
2945 #include <linux/debugfs.h>
2946
2947 static struct dentry *rootdir;
2948 static int inited = 0;
2949 static DEFINE_MUTEX(clk_debug_lock);
2950 static HLIST_HEAD(clk_debug_list);
2951
2952 static struct hlist_head *orphan_list[] = {
2953 &clk_orphan_list,
2954 NULL,
2955 };
2956
clk_summary_show_one(struct seq_file * s,struct clk_core * c,int level)2957 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2958 int level)
2959 {
2960 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
2961 level * 3 + 1, "",
2962 30 - level * 3, c->name,
2963 c->enable_count, c->prepare_count, c->protect_count,
2964 clk_core_get_rate(c), clk_core_get_accuracy(c),
2965 clk_core_get_phase(c),
2966 clk_core_get_scaled_duty_cycle(c, 100000));
2967 }
2968
clk_summary_show_subtree(struct seq_file * s,struct clk_core * c,int level)2969 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2970 int level)
2971 {
2972 struct clk_core *child;
2973
2974 clk_summary_show_one(s, c, level);
2975
2976 hlist_for_each_entry(child, &c->children, child_node)
2977 clk_summary_show_subtree(s, child, level + 1);
2978 }
2979
clk_summary_show(struct seq_file * s,void * data)2980 static int clk_summary_show(struct seq_file *s, void *data)
2981 {
2982 struct clk_core *c;
2983 struct hlist_head **lists = (struct hlist_head **)s->private;
2984
2985 seq_puts(s, " enable prepare protect duty\n");
2986 seq_puts(s, " clock count count count rate accuracy phase cycle\n");
2987 seq_puts(s, "---------------------------------------------------------------------------------------------\n");
2988
2989 clk_prepare_lock();
2990
2991 for (; *lists; lists++)
2992 hlist_for_each_entry(c, *lists, child_node)
2993 clk_summary_show_subtree(s, c, 0);
2994
2995 clk_prepare_unlock();
2996
2997 return 0;
2998 }
2999 DEFINE_SHOW_ATTRIBUTE(clk_summary);
3000
clk_dump_one(struct seq_file * s,struct clk_core * c,int level)3001 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
3002 {
3003 unsigned long min_rate, max_rate;
3004
3005 clk_core_get_boundaries(c, &min_rate, &max_rate);
3006
3007 /* This should be JSON format, i.e. elements separated with a comma */
3008 seq_printf(s, "\"%s\": { ", c->name);
3009 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
3010 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
3011 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
3012 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
3013 seq_printf(s, "\"min_rate\": %lu,", min_rate);
3014 seq_printf(s, "\"max_rate\": %lu,", max_rate);
3015 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
3016 seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
3017 seq_printf(s, "\"duty_cycle\": %u",
3018 clk_core_get_scaled_duty_cycle(c, 100000));
3019 }
3020
clk_dump_subtree(struct seq_file * s,struct clk_core * c,int level)3021 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
3022 {
3023 struct clk_core *child;
3024
3025 clk_dump_one(s, c, level);
3026
3027 hlist_for_each_entry(child, &c->children, child_node) {
3028 seq_putc(s, ',');
3029 clk_dump_subtree(s, child, level + 1);
3030 }
3031
3032 seq_putc(s, '}');
3033 }
3034
clk_dump_show(struct seq_file * s,void * data)3035 static int clk_dump_show(struct seq_file *s, void *data)
3036 {
3037 struct clk_core *c;
3038 bool first_node = true;
3039 struct hlist_head **lists = (struct hlist_head **)s->private;
3040
3041 seq_putc(s, '{');
3042 clk_prepare_lock();
3043
3044 for (; *lists; lists++) {
3045 hlist_for_each_entry(c, *lists, child_node) {
3046 if (!first_node)
3047 seq_putc(s, ',');
3048 first_node = false;
3049 clk_dump_subtree(s, c, 0);
3050 }
3051 }
3052
3053 clk_prepare_unlock();
3054
3055 seq_puts(s, "}\n");
3056 return 0;
3057 }
3058 DEFINE_SHOW_ATTRIBUTE(clk_dump);
3059
3060 static const struct {
3061 unsigned long flag;
3062 const char *name;
3063 } clk_flags[] = {
3064 #define ENTRY(f) { f, #f }
3065 ENTRY(CLK_SET_RATE_GATE),
3066 ENTRY(CLK_SET_PARENT_GATE),
3067 ENTRY(CLK_SET_RATE_PARENT),
3068 ENTRY(CLK_IGNORE_UNUSED),
3069 ENTRY(CLK_GET_RATE_NOCACHE),
3070 ENTRY(CLK_SET_RATE_NO_REPARENT),
3071 ENTRY(CLK_GET_ACCURACY_NOCACHE),
3072 ENTRY(CLK_RECALC_NEW_RATES),
3073 ENTRY(CLK_SET_RATE_UNGATE),
3074 ENTRY(CLK_IS_CRITICAL),
3075 ENTRY(CLK_OPS_PARENT_ENABLE),
3076 ENTRY(CLK_DUTY_CYCLE_PARENT),
3077 #undef ENTRY
3078 };
3079
clk_flags_show(struct seq_file * s,void * data)3080 static int clk_flags_show(struct seq_file *s, void *data)
3081 {
3082 struct clk_core *core = s->private;
3083 unsigned long flags = core->flags;
3084 unsigned int i;
3085
3086 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3087 if (flags & clk_flags[i].flag) {
3088 seq_printf(s, "%s\n", clk_flags[i].name);
3089 flags &= ~clk_flags[i].flag;
3090 }
3091 }
3092 if (flags) {
3093 /* Unknown flags */
3094 seq_printf(s, "0x%lx\n", flags);
3095 }
3096
3097 return 0;
3098 }
3099 DEFINE_SHOW_ATTRIBUTE(clk_flags);
3100
possible_parent_show(struct seq_file * s,struct clk_core * core,unsigned int i,char terminator)3101 static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3102 unsigned int i, char terminator)
3103 {
3104 struct clk_core *parent;
3105
3106 /*
3107 * Go through the following options to fetch a parent's name.
3108 *
3109 * 1. Fetch the registered parent clock and use its name
3110 * 2. Use the global (fallback) name if specified
3111 * 3. Use the local fw_name if provided
3112 * 4. Fetch parent clock's clock-output-name if DT index was set
3113 *
3114 * This may still fail in some cases, such as when the parent is
3115 * specified directly via a struct clk_hw pointer, but it isn't
3116 * registered (yet).
3117 */
3118 parent = clk_core_get_parent_by_index(core, i);
3119 if (parent)
3120 seq_puts(s, parent->name);
3121 else if (core->parents[i].name)
3122 seq_puts(s, core->parents[i].name);
3123 else if (core->parents[i].fw_name)
3124 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3125 else if (core->parents[i].index >= 0)
3126 seq_puts(s,
3127 of_clk_get_parent_name(core->of_node,
3128 core->parents[i].index));
3129 else
3130 seq_puts(s, "(missing)");
3131
3132 seq_putc(s, terminator);
3133 }
3134
possible_parents_show(struct seq_file * s,void * data)3135 static int possible_parents_show(struct seq_file *s, void *data)
3136 {
3137 struct clk_core *core = s->private;
3138 int i;
3139
3140 for (i = 0; i < core->num_parents - 1; i++)
3141 possible_parent_show(s, core, i, ' ');
3142
3143 possible_parent_show(s, core, i, '\n');
3144
3145 return 0;
3146 }
3147 DEFINE_SHOW_ATTRIBUTE(possible_parents);
3148
current_parent_show(struct seq_file * s,void * data)3149 static int current_parent_show(struct seq_file *s, void *data)
3150 {
3151 struct clk_core *core = s->private;
3152
3153 if (core->parent)
3154 seq_printf(s, "%s\n", core->parent->name);
3155
3156 return 0;
3157 }
3158 DEFINE_SHOW_ATTRIBUTE(current_parent);
3159
clk_duty_cycle_show(struct seq_file * s,void * data)3160 static int clk_duty_cycle_show(struct seq_file *s, void *data)
3161 {
3162 struct clk_core *core = s->private;
3163 struct clk_duty *duty = &core->duty;
3164
3165 seq_printf(s, "%u/%u\n", duty->num, duty->den);
3166
3167 return 0;
3168 }
3169 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3170
clk_min_rate_show(struct seq_file * s,void * data)3171 static int clk_min_rate_show(struct seq_file *s, void *data)
3172 {
3173 struct clk_core *core = s->private;
3174 unsigned long min_rate, max_rate;
3175
3176 clk_prepare_lock();
3177 clk_core_get_boundaries(core, &min_rate, &max_rate);
3178 clk_prepare_unlock();
3179 seq_printf(s, "%lu\n", min_rate);
3180
3181 return 0;
3182 }
3183 DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
3184
clk_max_rate_show(struct seq_file * s,void * data)3185 static int clk_max_rate_show(struct seq_file *s, void *data)
3186 {
3187 struct clk_core *core = s->private;
3188 unsigned long min_rate, max_rate;
3189
3190 clk_prepare_lock();
3191 clk_core_get_boundaries(core, &min_rate, &max_rate);
3192 clk_prepare_unlock();
3193 seq_printf(s, "%lu\n", max_rate);
3194
3195 return 0;
3196 }
3197 DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
3198
clk_debug_create_one(struct clk_core * core,struct dentry * pdentry)3199 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3200 {
3201 struct dentry *root;
3202
3203 if (!core || !pdentry)
3204 return;
3205
3206 root = debugfs_create_dir(core->name, pdentry);
3207 core->dentry = root;
3208
3209 debugfs_create_ulong("clk_rate", 0444, root, &core->rate);
3210 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3211 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3212 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3213 debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3214 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3215 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3216 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3217 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3218 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3219 debugfs_create_file("clk_duty_cycle", 0444, root, core,
3220 &clk_duty_cycle_fops);
3221
3222 if (core->num_parents > 0)
3223 debugfs_create_file("clk_parent", 0444, root, core,
3224 ¤t_parent_fops);
3225
3226 if (core->num_parents > 1)
3227 debugfs_create_file("clk_possible_parents", 0444, root, core,
3228 &possible_parents_fops);
3229
3230 if (core->ops->debug_init)
3231 core->ops->debug_init(core->hw, core->dentry);
3232 }
3233
3234 /**
3235 * clk_debug_register - add a clk node to the debugfs clk directory
3236 * @core: the clk being added to the debugfs clk directory
3237 *
3238 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
3239 * initialized. Otherwise it bails out early since the debugfs clk directory
3240 * will be created lazily by clk_debug_init as part of a late_initcall.
3241 */
clk_debug_register(struct clk_core * core)3242 static void clk_debug_register(struct clk_core *core)
3243 {
3244 mutex_lock(&clk_debug_lock);
3245 hlist_add_head(&core->debug_node, &clk_debug_list);
3246 if (inited)
3247 clk_debug_create_one(core, rootdir);
3248 mutex_unlock(&clk_debug_lock);
3249 }
3250
3251 /**
3252 * clk_debug_unregister - remove a clk node from the debugfs clk directory
3253 * @core: the clk being removed from the debugfs clk directory
3254 *
3255 * Dynamically removes a clk and all its child nodes from the
3256 * debugfs clk directory if clk->dentry points to debugfs created by
3257 * clk_debug_register in __clk_core_init.
3258 */
clk_debug_unregister(struct clk_core * core)3259 static void clk_debug_unregister(struct clk_core *core)
3260 {
3261 mutex_lock(&clk_debug_lock);
3262 hlist_del_init(&core->debug_node);
3263 debugfs_remove_recursive(core->dentry);
3264 core->dentry = NULL;
3265 mutex_unlock(&clk_debug_lock);
3266 }
3267
3268 /**
3269 * clk_debug_init - lazily populate the debugfs clk directory
3270 *
3271 * clks are often initialized very early during boot before memory can be
3272 * dynamically allocated and well before debugfs is setup. This function
3273 * populates the debugfs clk directory once at boot-time when we know that
3274 * debugfs is setup. It should only be called once at boot-time, all other clks
3275 * added dynamically will be done so with clk_debug_register.
3276 */
clk_debug_init(void)3277 static int __init clk_debug_init(void)
3278 {
3279 struct clk_core *core;
3280
3281 rootdir = debugfs_create_dir("clk", NULL);
3282
3283 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3284 &clk_summary_fops);
3285 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3286 &clk_dump_fops);
3287 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3288 &clk_summary_fops);
3289 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3290 &clk_dump_fops);
3291
3292 mutex_lock(&clk_debug_lock);
3293 hlist_for_each_entry(core, &clk_debug_list, debug_node)
3294 clk_debug_create_one(core, rootdir);
3295
3296 inited = 1;
3297 mutex_unlock(&clk_debug_lock);
3298
3299 return 0;
3300 }
3301 late_initcall(clk_debug_init);
3302 #else
clk_debug_register(struct clk_core * core)3303 static inline void clk_debug_register(struct clk_core *core) { }
clk_debug_reparent(struct clk_core * core,struct clk_core * new_parent)3304 static inline void clk_debug_reparent(struct clk_core *core,
3305 struct clk_core *new_parent)
3306 {
3307 }
clk_debug_unregister(struct clk_core * core)3308 static inline void clk_debug_unregister(struct clk_core *core)
3309 {
3310 }
3311 #endif
3312
clk_core_reparent_orphans_nolock(void)3313 static void clk_core_reparent_orphans_nolock(void)
3314 {
3315 struct clk_core *orphan;
3316 struct hlist_node *tmp2;
3317
3318 /*
3319 * walk the list of orphan clocks and reparent any that newly finds a
3320 * parent.
3321 */
3322 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3323 struct clk_core *parent = __clk_init_parent(orphan);
3324
3325 /*
3326 * We need to use __clk_set_parent_before() and _after() to
3327 * to properly migrate any prepare/enable count of the orphan
3328 * clock. This is important for CLK_IS_CRITICAL clocks, which
3329 * are enabled during init but might not have a parent yet.
3330 */
3331 if (parent) {
3332 /* update the clk tree topology */
3333 __clk_set_parent_before(orphan, parent);
3334 __clk_set_parent_after(orphan, parent, NULL);
3335 __clk_recalc_accuracies(orphan);
3336 __clk_recalc_rates(orphan, 0);
3337 __clk_core_update_orphan_hold_state(orphan);
3338 }
3339 }
3340 }
3341
3342 /**
3343 * __clk_core_init - initialize the data structures in a struct clk_core
3344 * @core: clk_core being initialized
3345 *
3346 * Initializes the lists in struct clk_core, queries the hardware for the
3347 * parent and rate and sets them both.
3348 */
__clk_core_init(struct clk_core * core)3349 static int __clk_core_init(struct clk_core *core)
3350 {
3351 int ret;
3352 unsigned long rate;
3353
3354 if (!core)
3355 return -EINVAL;
3356
3357 clk_prepare_lock();
3358
3359 ret = clk_pm_runtime_get(core);
3360 if (ret)
3361 goto unlock;
3362
3363 /* check to see if a clock with this name is already registered */
3364 if (clk_core_lookup(core->name)) {
3365 pr_debug("%s: clk %s already initialized\n",
3366 __func__, core->name);
3367 ret = -EEXIST;
3368 goto out;
3369 }
3370
3371 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */
3372 if (core->ops->set_rate &&
3373 !((core->ops->round_rate || core->ops->determine_rate) &&
3374 core->ops->recalc_rate)) {
3375 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3376 __func__, core->name);
3377 ret = -EINVAL;
3378 goto out;
3379 }
3380
3381 if (core->ops->set_parent && !core->ops->get_parent) {
3382 pr_err("%s: %s must implement .get_parent & .set_parent\n",
3383 __func__, core->name);
3384 ret = -EINVAL;
3385 goto out;
3386 }
3387
3388 if (core->num_parents > 1 && !core->ops->get_parent) {
3389 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3390 __func__, core->name);
3391 ret = -EINVAL;
3392 goto out;
3393 }
3394
3395 if (core->ops->set_rate_and_parent &&
3396 !(core->ops->set_parent && core->ops->set_rate)) {
3397 pr_err("%s: %s must implement .set_parent & .set_rate\n",
3398 __func__, core->name);
3399 ret = -EINVAL;
3400 goto out;
3401 }
3402
3403 core->parent = __clk_init_parent(core);
3404
3405 /*
3406 * Populate core->parent if parent has already been clk_core_init'd. If
3407 * parent has not yet been clk_core_init'd then place clk in the orphan
3408 * list. If clk doesn't have any parents then place it in the root
3409 * clk list.
3410 *
3411 * Every time a new clk is clk_init'd then we walk the list of orphan
3412 * clocks and re-parent any that are children of the clock currently
3413 * being clk_init'd.
3414 */
3415 if (core->parent) {
3416 hlist_add_head(&core->child_node,
3417 &core->parent->children);
3418 core->orphan = core->parent->orphan;
3419 } else if (!core->num_parents) {
3420 hlist_add_head(&core->child_node, &clk_root_list);
3421 core->orphan = false;
3422 } else {
3423 hlist_add_head(&core->child_node, &clk_orphan_list);
3424 core->orphan = true;
3425 }
3426
3427 /*
3428 * optional platform-specific magic
3429 *
3430 * The .init callback is not used by any of the basic clock types, but
3431 * exists for weird hardware that must perform initialization magic.
3432 * Please consider other ways of solving initialization problems before
3433 * using this callback, as its use is discouraged.
3434 */
3435 if (core->ops->init)
3436 core->ops->init(core->hw);
3437
3438 /*
3439 * Set clk's accuracy. The preferred method is to use
3440 * .recalc_accuracy. For simple clocks and lazy developers the default
3441 * fallback is to use the parent's accuracy. If a clock doesn't have a
3442 * parent (or is orphaned) then accuracy is set to zero (perfect
3443 * clock).
3444 */
3445 if (core->ops->recalc_accuracy)
3446 core->accuracy = core->ops->recalc_accuracy(core->hw,
3447 __clk_get_accuracy(core->parent));
3448 else if (core->parent)
3449 core->accuracy = core->parent->accuracy;
3450 else
3451 core->accuracy = 0;
3452
3453 /*
3454 * Set clk's phase.
3455 * Since a phase is by definition relative to its parent, just
3456 * query the current clock phase, or just assume it's in phase.
3457 */
3458 if (core->ops->get_phase)
3459 core->phase = core->ops->get_phase(core->hw);
3460 else
3461 core->phase = 0;
3462
3463 /*
3464 * Set clk's duty cycle.
3465 */
3466 clk_core_update_duty_cycle_nolock(core);
3467
3468 /*
3469 * Set clk's rate. The preferred method is to use .recalc_rate. For
3470 * simple clocks and lazy developers the default fallback is to use the
3471 * parent's rate. If a clock doesn't have a parent (or is orphaned)
3472 * then rate is set to zero.
3473 */
3474 if (core->ops->recalc_rate)
3475 rate = core->ops->recalc_rate(core->hw,
3476 clk_core_get_rate_nolock(core->parent));
3477 else if (core->parent)
3478 rate = core->parent->rate;
3479 else
3480 rate = 0;
3481 core->rate = core->req_rate = rate;
3482
3483 core->boot_enabled = clk_core_is_enabled(core);
3484
3485 /*
3486 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
3487 * don't get accidentally disabled when walking the orphan tree and
3488 * reparenting clocks
3489 */
3490 if (core->flags & CLK_IS_CRITICAL) {
3491 unsigned long flags;
3492
3493 ret = clk_core_prepare(core);
3494 if (ret)
3495 goto out;
3496
3497 flags = clk_enable_lock();
3498 ret = clk_core_enable(core);
3499 clk_enable_unlock(flags);
3500 if (ret) {
3501 clk_core_unprepare(core);
3502 goto out;
3503 }
3504 }
3505
3506 clk_core_hold_state(core);
3507 clk_core_reparent_orphans_nolock();
3508
3509 kref_init(&core->ref);
3510 out:
3511 clk_pm_runtime_put(core);
3512 unlock:
3513 clk_prepare_unlock();
3514
3515 if (!ret)
3516 clk_debug_register(core);
3517
3518 return ret;
3519 }
3520
3521 /**
3522 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
3523 * @core: clk to add consumer to
3524 * @clk: consumer to link to a clk
3525 */
clk_core_link_consumer(struct clk_core * core,struct clk * clk)3526 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3527 {
3528 clk_prepare_lock();
3529 hlist_add_head(&clk->clks_node, &core->clks);
3530 clk_prepare_unlock();
3531 }
3532
3533 /**
3534 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
3535 * @clk: consumer to unlink
3536 */
clk_core_unlink_consumer(struct clk * clk)3537 static void clk_core_unlink_consumer(struct clk *clk)
3538 {
3539 lockdep_assert_held(&prepare_lock);
3540 hlist_del(&clk->clks_node);
3541 }
3542
3543 /**
3544 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
3545 * @core: clk to allocate a consumer for
3546 * @dev_id: string describing device name
3547 * @con_id: connection ID string on device
3548 *
3549 * Returns: clk consumer left unlinked from the consumer list
3550 */
alloc_clk(struct clk_core * core,const char * dev_id,const char * con_id)3551 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3552 const char *con_id)
3553 {
3554 struct clk *clk;
3555
3556 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3557 if (!clk)
3558 return ERR_PTR(-ENOMEM);
3559
3560 clk->core = core;
3561 clk->dev_id = dev_id;
3562 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3563 clk->max_rate = ULONG_MAX;
3564
3565 return clk;
3566 }
3567
3568 /**
3569 * free_clk - Free a clk consumer
3570 * @clk: clk consumer to free
3571 *
3572 * Note, this assumes the clk has been unlinked from the clk_core consumer
3573 * list.
3574 */
free_clk(struct clk * clk)3575 static void free_clk(struct clk *clk)
3576 {
3577 kfree_const(clk->con_id);
3578 kfree(clk);
3579 }
3580
3581 /**
3582 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
3583 * a clk_hw
3584 * @dev: clk consumer device
3585 * @hw: clk_hw associated with the clk being consumed
3586 * @dev_id: string describing device name
3587 * @con_id: connection ID string on device
3588 *
3589 * This is the main function used to create a clk pointer for use by clk
3590 * consumers. It connects a consumer to the clk_core and clk_hw structures
3591 * used by the framework and clk provider respectively.
3592 */
clk_hw_create_clk(struct device * dev,struct clk_hw * hw,const char * dev_id,const char * con_id)3593 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3594 const char *dev_id, const char *con_id)
3595 {
3596 struct clk *clk;
3597 struct clk_core *core;
3598
3599 /* This is to allow this function to be chained to others */
3600 if (IS_ERR_OR_NULL(hw))
3601 return ERR_CAST(hw);
3602
3603 core = hw->core;
3604 clk = alloc_clk(core, dev_id, con_id);
3605 if (IS_ERR(clk))
3606 return clk;
3607 clk->dev = dev;
3608
3609 if (!try_module_get(core->owner)) {
3610 free_clk(clk);
3611 return ERR_PTR(-ENOENT);
3612 }
3613
3614 kref_get(&core->ref);
3615 clk_core_link_consumer(core, clk);
3616
3617 return clk;
3618 }
3619
clk_cpy_name(const char ** dst_p,const char * src,bool must_exist)3620 static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
3621 {
3622 const char *dst;
3623
3624 if (!src) {
3625 if (must_exist)
3626 return -EINVAL;
3627 return 0;
3628 }
3629
3630 *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
3631 if (!dst)
3632 return -ENOMEM;
3633
3634 return 0;
3635 }
3636
clk_core_populate_parent_map(struct clk_core * core,const struct clk_init_data * init)3637 static int clk_core_populate_parent_map(struct clk_core *core,
3638 const struct clk_init_data *init)
3639 {
3640 u8 num_parents = init->num_parents;
3641 const char * const *parent_names = init->parent_names;
3642 const struct clk_hw **parent_hws = init->parent_hws;
3643 const struct clk_parent_data *parent_data = init->parent_data;
3644 int i, ret = 0;
3645 struct clk_parent_map *parents, *parent;
3646
3647 if (!num_parents)
3648 return 0;
3649
3650 /*
3651 * Avoid unnecessary string look-ups of clk_core's possible parents by
3652 * having a cache of names/clk_hw pointers to clk_core pointers.
3653 */
3654 parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
3655 core->parents = parents;
3656 if (!parents)
3657 return -ENOMEM;
3658
3659 /* Copy everything over because it might be __initdata */
3660 for (i = 0, parent = parents; i < num_parents; i++, parent++) {
3661 parent->index = -1;
3662 if (parent_names) {
3663 /* throw a WARN if any entries are NULL */
3664 WARN(!parent_names[i],
3665 "%s: invalid NULL in %s's .parent_names\n",
3666 __func__, core->name);
3667 ret = clk_cpy_name(&parent->name, parent_names[i],
3668 true);
3669 } else if (parent_data) {
3670 parent->hw = parent_data[i].hw;
3671 parent->index = parent_data[i].index;
3672 ret = clk_cpy_name(&parent->fw_name,
3673 parent_data[i].fw_name, false);
3674 if (!ret)
3675 ret = clk_cpy_name(&parent->name,
3676 parent_data[i].name,
3677 false);
3678 } else if (parent_hws) {
3679 parent->hw = parent_hws[i];
3680 } else {
3681 ret = -EINVAL;
3682 WARN(1, "Must specify parents if num_parents > 0\n");
3683 }
3684
3685 if (ret) {
3686 do {
3687 kfree_const(parents[i].name);
3688 kfree_const(parents[i].fw_name);
3689 } while (--i >= 0);
3690 kfree(parents);
3691
3692 return ret;
3693 }
3694 }
3695
3696 return 0;
3697 }
3698
clk_core_free_parent_map(struct clk_core * core)3699 static void clk_core_free_parent_map(struct clk_core *core)
3700 {
3701 int i = core->num_parents;
3702
3703 if (!core->num_parents)
3704 return;
3705
3706 while (--i >= 0) {
3707 kfree_const(core->parents[i].name);
3708 kfree_const(core->parents[i].fw_name);
3709 }
3710
3711 kfree(core->parents);
3712 }
3713
3714 static struct clk *
__clk_register(struct device * dev,struct device_node * np,struct clk_hw * hw)3715 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
3716 {
3717 int ret;
3718 struct clk_core *core;
3719 const struct clk_init_data *init = hw->init;
3720
3721 /*
3722 * The init data is not supposed to be used outside of registration path.
3723 * Set it to NULL so that provider drivers can't use it either and so that
3724 * we catch use of hw->init early on in the core.
3725 */
3726 hw->init = NULL;
3727
3728 core = kzalloc(sizeof(*core), GFP_KERNEL);
3729 if (!core) {
3730 ret = -ENOMEM;
3731 goto fail_out;
3732 }
3733
3734 core->name = kstrdup_const(init->name, GFP_KERNEL);
3735 if (!core->name) {
3736 ret = -ENOMEM;
3737 goto fail_name;
3738 }
3739
3740 if (WARN_ON(!init->ops)) {
3741 ret = -EINVAL;
3742 goto fail_ops;
3743 }
3744 core->ops = init->ops;
3745
3746 if (dev && pm_runtime_enabled(dev))
3747 core->rpm_enabled = true;
3748 core->dev = dev;
3749 core->of_node = np;
3750 if (dev && dev->driver)
3751 core->owner = dev->driver->owner;
3752 core->hw = hw;
3753 core->flags = init->flags;
3754 core->num_parents = init->num_parents;
3755 core->min_rate = 0;
3756 core->max_rate = ULONG_MAX;
3757 hw->core = core;
3758
3759 ret = clk_core_populate_parent_map(core, init);
3760 if (ret)
3761 goto fail_parents;
3762
3763 INIT_HLIST_HEAD(&core->clks);
3764
3765 /*
3766 * Don't call clk_hw_create_clk() here because that would pin the
3767 * provider module to itself and prevent it from ever being removed.
3768 */
3769 hw->clk = alloc_clk(core, NULL, NULL);
3770 if (IS_ERR(hw->clk)) {
3771 ret = PTR_ERR(hw->clk);
3772 goto fail_create_clk;
3773 }
3774
3775 clk_core_link_consumer(hw->core, hw->clk);
3776
3777 ret = __clk_core_init(core);
3778 if (!ret)
3779 return hw->clk;
3780
3781 clk_prepare_lock();
3782 clk_core_unlink_consumer(hw->clk);
3783 clk_prepare_unlock();
3784
3785 free_clk(hw->clk);
3786 hw->clk = NULL;
3787
3788 fail_create_clk:
3789 clk_core_free_parent_map(core);
3790 fail_parents:
3791 fail_ops:
3792 kfree_const(core->name);
3793 fail_name:
3794 kfree(core);
3795 fail_out:
3796 return ERR_PTR(ret);
3797 }
3798
3799 /**
3800 * clk_register - allocate a new clock, register it and return an opaque cookie
3801 * @dev: device that is registering this clock
3802 * @hw: link to hardware-specific clock data
3803 *
3804 * clk_register is the *deprecated* interface for populating the clock tree with
3805 * new clock nodes. Use clk_hw_register() instead.
3806 *
3807 * Returns: a pointer to the newly allocated struct clk which
3808 * cannot be dereferenced by driver code but may be used in conjunction with the
3809 * rest of the clock API. In the event of an error clk_register will return an
3810 * error code; drivers must test for an error code after calling clk_register.
3811 */
clk_register(struct device * dev,struct clk_hw * hw)3812 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
3813 {
3814 return __clk_register(dev, dev_of_node(dev), hw);
3815 }
3816 EXPORT_SYMBOL_GPL(clk_register);
3817
3818 /**
3819 * clk_hw_register - register a clk_hw and return an error code
3820 * @dev: device that is registering this clock
3821 * @hw: link to hardware-specific clock data
3822 *
3823 * clk_hw_register is the primary interface for populating the clock tree with
3824 * new clock nodes. It returns an integer equal to zero indicating success or
3825 * less than zero indicating failure. Drivers must test for an error code after
3826 * calling clk_hw_register().
3827 */
clk_hw_register(struct device * dev,struct clk_hw * hw)3828 int clk_hw_register(struct device *dev, struct clk_hw *hw)
3829 {
3830 return PTR_ERR_OR_ZERO(__clk_register(dev, dev_of_node(dev), hw));
3831 }
3832 EXPORT_SYMBOL_GPL(clk_hw_register);
3833
3834 /*
3835 * of_clk_hw_register - register a clk_hw and return an error code
3836 * @node: device_node of device that is registering this clock
3837 * @hw: link to hardware-specific clock data
3838 *
3839 * of_clk_hw_register() is the primary interface for populating the clock tree
3840 * with new clock nodes when a struct device is not available, but a struct
3841 * device_node is. It returns an integer equal to zero indicating success or
3842 * less than zero indicating failure. Drivers must test for an error code after
3843 * calling of_clk_hw_register().
3844 */
of_clk_hw_register(struct device_node * node,struct clk_hw * hw)3845 int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
3846 {
3847 return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
3848 }
3849 EXPORT_SYMBOL_GPL(of_clk_hw_register);
3850
3851 /* Free memory allocated for a clock. */
__clk_release(struct kref * ref)3852 static void __clk_release(struct kref *ref)
3853 {
3854 struct clk_core *core = container_of(ref, struct clk_core, ref);
3855
3856 lockdep_assert_held(&prepare_lock);
3857
3858 clk_core_free_parent_map(core);
3859 kfree_const(core->name);
3860 kfree(core);
3861 }
3862
3863 /*
3864 * Empty clk_ops for unregistered clocks. These are used temporarily
3865 * after clk_unregister() was called on a clock and until last clock
3866 * consumer calls clk_put() and the struct clk object is freed.
3867 */
clk_nodrv_prepare_enable(struct clk_hw * hw)3868 static int clk_nodrv_prepare_enable(struct clk_hw *hw)
3869 {
3870 return -ENXIO;
3871 }
3872
clk_nodrv_disable_unprepare(struct clk_hw * hw)3873 static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
3874 {
3875 WARN_ON_ONCE(1);
3876 }
3877
clk_nodrv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)3878 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
3879 unsigned long parent_rate)
3880 {
3881 return -ENXIO;
3882 }
3883
clk_nodrv_set_parent(struct clk_hw * hw,u8 index)3884 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
3885 {
3886 return -ENXIO;
3887 }
3888
3889 static const struct clk_ops clk_nodrv_ops = {
3890 .enable = clk_nodrv_prepare_enable,
3891 .disable = clk_nodrv_disable_unprepare,
3892 .prepare = clk_nodrv_prepare_enable,
3893 .unprepare = clk_nodrv_disable_unprepare,
3894 .set_rate = clk_nodrv_set_rate,
3895 .set_parent = clk_nodrv_set_parent,
3896 };
3897
clk_core_evict_parent_cache_subtree(struct clk_core * root,struct clk_core * target)3898 static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
3899 struct clk_core *target)
3900 {
3901 int i;
3902 struct clk_core *child;
3903
3904 for (i = 0; i < root->num_parents; i++)
3905 if (root->parents[i].core == target)
3906 root->parents[i].core = NULL;
3907
3908 hlist_for_each_entry(child, &root->children, child_node)
3909 clk_core_evict_parent_cache_subtree(child, target);
3910 }
3911
3912 /* Remove this clk from all parent caches */
clk_core_evict_parent_cache(struct clk_core * core)3913 static void clk_core_evict_parent_cache(struct clk_core *core)
3914 {
3915 struct hlist_head **lists;
3916 struct clk_core *root;
3917
3918 lockdep_assert_held(&prepare_lock);
3919
3920 for (lists = all_lists; *lists; lists++)
3921 hlist_for_each_entry(root, *lists, child_node)
3922 clk_core_evict_parent_cache_subtree(root, core);
3923
3924 }
3925
3926 /**
3927 * clk_unregister - unregister a currently registered clock
3928 * @clk: clock to unregister
3929 */
clk_unregister(struct clk * clk)3930 void clk_unregister(struct clk *clk)
3931 {
3932 unsigned long flags;
3933
3934 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
3935 return;
3936
3937 clk_debug_unregister(clk->core);
3938
3939 clk_prepare_lock();
3940
3941 if (clk->core->ops == &clk_nodrv_ops) {
3942 pr_err("%s: unregistered clock: %s\n", __func__,
3943 clk->core->name);
3944 goto unlock;
3945 }
3946 /*
3947 * Assign empty clock ops for consumers that might still hold
3948 * a reference to this clock.
3949 */
3950 flags = clk_enable_lock();
3951 clk->core->ops = &clk_nodrv_ops;
3952 clk_enable_unlock(flags);
3953
3954 if (!hlist_empty(&clk->core->children)) {
3955 struct clk_core *child;
3956 struct hlist_node *t;
3957
3958 /* Reparent all children to the orphan list. */
3959 hlist_for_each_entry_safe(child, t, &clk->core->children,
3960 child_node)
3961 clk_core_set_parent_nolock(child, NULL);
3962 }
3963
3964 clk_core_evict_parent_cache(clk->core);
3965
3966 hlist_del_init(&clk->core->child_node);
3967
3968 if (clk->core->prepare_count)
3969 pr_warn("%s: unregistering prepared clock: %s\n",
3970 __func__, clk->core->name);
3971
3972 if (clk->core->protect_count)
3973 pr_warn("%s: unregistering protected clock: %s\n",
3974 __func__, clk->core->name);
3975
3976 kref_put(&clk->core->ref, __clk_release);
3977 free_clk(clk);
3978 unlock:
3979 clk_prepare_unlock();
3980 }
3981 EXPORT_SYMBOL_GPL(clk_unregister);
3982
3983 /**
3984 * clk_hw_unregister - unregister a currently registered clk_hw
3985 * @hw: hardware-specific clock data to unregister
3986 */
clk_hw_unregister(struct clk_hw * hw)3987 void clk_hw_unregister(struct clk_hw *hw)
3988 {
3989 clk_unregister(hw->clk);
3990 }
3991 EXPORT_SYMBOL_GPL(clk_hw_unregister);
3992
devm_clk_release(struct device * dev,void * res)3993 static void devm_clk_release(struct device *dev, void *res)
3994 {
3995 clk_unregister(*(struct clk **)res);
3996 }
3997
devm_clk_hw_release(struct device * dev,void * res)3998 static void devm_clk_hw_release(struct device *dev, void *res)
3999 {
4000 clk_hw_unregister(*(struct clk_hw **)res);
4001 }
4002
4003 /**
4004 * devm_clk_register - resource managed clk_register()
4005 * @dev: device that is registering this clock
4006 * @hw: link to hardware-specific clock data
4007 *
4008 * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
4009 *
4010 * Clocks returned from this function are automatically clk_unregister()ed on
4011 * driver detach. See clk_register() for more information.
4012 */
devm_clk_register(struct device * dev,struct clk_hw * hw)4013 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
4014 {
4015 struct clk *clk;
4016 struct clk **clkp;
4017
4018 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
4019 if (!clkp)
4020 return ERR_PTR(-ENOMEM);
4021
4022 clk = clk_register(dev, hw);
4023 if (!IS_ERR(clk)) {
4024 *clkp = clk;
4025 devres_add(dev, clkp);
4026 } else {
4027 devres_free(clkp);
4028 }
4029
4030 return clk;
4031 }
4032 EXPORT_SYMBOL_GPL(devm_clk_register);
4033
4034 /**
4035 * devm_clk_hw_register - resource managed clk_hw_register()
4036 * @dev: device that is registering this clock
4037 * @hw: link to hardware-specific clock data
4038 *
4039 * Managed clk_hw_register(). Clocks registered by this function are
4040 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
4041 * for more information.
4042 */
devm_clk_hw_register(struct device * dev,struct clk_hw * hw)4043 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
4044 {
4045 struct clk_hw **hwp;
4046 int ret;
4047
4048 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
4049 if (!hwp)
4050 return -ENOMEM;
4051
4052 ret = clk_hw_register(dev, hw);
4053 if (!ret) {
4054 *hwp = hw;
4055 devres_add(dev, hwp);
4056 } else {
4057 devres_free(hwp);
4058 }
4059
4060 return ret;
4061 }
4062 EXPORT_SYMBOL_GPL(devm_clk_hw_register);
4063
devm_clk_match(struct device * dev,void * res,void * data)4064 static int devm_clk_match(struct device *dev, void *res, void *data)
4065 {
4066 struct clk *c = res;
4067 if (WARN_ON(!c))
4068 return 0;
4069 return c == data;
4070 }
4071
devm_clk_hw_match(struct device * dev,void * res,void * data)4072 static int devm_clk_hw_match(struct device *dev, void *res, void *data)
4073 {
4074 struct clk_hw *hw = res;
4075
4076 if (WARN_ON(!hw))
4077 return 0;
4078 return hw == data;
4079 }
4080
4081 /**
4082 * devm_clk_unregister - resource managed clk_unregister()
4083 * @clk: clock to unregister
4084 *
4085 * Deallocate a clock allocated with devm_clk_register(). Normally
4086 * this function will not need to be called and the resource management
4087 * code will ensure that the resource is freed.
4088 */
devm_clk_unregister(struct device * dev,struct clk * clk)4089 void devm_clk_unregister(struct device *dev, struct clk *clk)
4090 {
4091 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
4092 }
4093 EXPORT_SYMBOL_GPL(devm_clk_unregister);
4094
4095 /**
4096 * devm_clk_hw_unregister - resource managed clk_hw_unregister()
4097 * @dev: device that is unregistering the hardware-specific clock data
4098 * @hw: link to hardware-specific clock data
4099 *
4100 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
4101 * this function will not need to be called and the resource management
4102 * code will ensure that the resource is freed.
4103 */
devm_clk_hw_unregister(struct device * dev,struct clk_hw * hw)4104 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
4105 {
4106 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
4107 hw));
4108 }
4109 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
4110
4111 /*
4112 * clkdev helpers
4113 */
4114
__clk_put(struct clk * clk)4115 void __clk_put(struct clk *clk)
4116 {
4117 struct module *owner;
4118
4119 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4120 return;
4121
4122 clk_prepare_lock();
4123
4124 /*
4125 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
4126 * given user should be balanced with calls to clk_rate_exclusive_put()
4127 * and by that same consumer
4128 */
4129 if (WARN_ON(clk->exclusive_count)) {
4130 /* We voiced our concern, let's sanitize the situation */
4131 clk->core->protect_count -= (clk->exclusive_count - 1);
4132 clk_core_rate_unprotect(clk->core);
4133 clk->exclusive_count = 0;
4134 }
4135
4136 hlist_del(&clk->clks_node);
4137 if (clk->min_rate > clk->core->req_rate ||
4138 clk->max_rate < clk->core->req_rate)
4139 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4140
4141 owner = clk->core->owner;
4142 kref_put(&clk->core->ref, __clk_release);
4143
4144 clk_prepare_unlock();
4145
4146 module_put(owner);
4147
4148 free_clk(clk);
4149 }
4150
4151 /*** clk rate change notifiers ***/
4152
4153 /**
4154 * clk_notifier_register - add a clk rate change notifier
4155 * @clk: struct clk * to watch
4156 * @nb: struct notifier_block * with callback info
4157 *
4158 * Request notification when clk's rate changes. This uses an SRCU
4159 * notifier because we want it to block and notifier unregistrations are
4160 * uncommon. The callbacks associated with the notifier must not
4161 * re-enter into the clk framework by calling any top-level clk APIs;
4162 * this will cause a nested prepare_lock mutex.
4163 *
4164 * In all notification cases (pre, post and abort rate change) the original
4165 * clock rate is passed to the callback via struct clk_notifier_data.old_rate
4166 * and the new frequency is passed via struct clk_notifier_data.new_rate.
4167 *
4168 * clk_notifier_register() must be called from non-atomic context.
4169 * Returns -EINVAL if called with null arguments, -ENOMEM upon
4170 * allocation failure; otherwise, passes along the return value of
4171 * srcu_notifier_chain_register().
4172 */
clk_notifier_register(struct clk * clk,struct notifier_block * nb)4173 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4174 {
4175 struct clk_notifier *cn;
4176 int ret = -ENOMEM;
4177
4178 if (!clk || !nb)
4179 return -EINVAL;
4180
4181 clk_prepare_lock();
4182
4183 /* search the list of notifiers for this clk */
4184 list_for_each_entry(cn, &clk_notifier_list, node)
4185 if (cn->clk == clk)
4186 break;
4187
4188 /* if clk wasn't in the notifier list, allocate new clk_notifier */
4189 if (cn->clk != clk) {
4190 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
4191 if (!cn)
4192 goto out;
4193
4194 cn->clk = clk;
4195 srcu_init_notifier_head(&cn->notifier_head);
4196
4197 list_add(&cn->node, &clk_notifier_list);
4198 }
4199
4200 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4201
4202 clk->core->notifier_count++;
4203
4204 out:
4205 clk_prepare_unlock();
4206
4207 return ret;
4208 }
4209 EXPORT_SYMBOL_GPL(clk_notifier_register);
4210
4211 /**
4212 * clk_notifier_unregister - remove a clk rate change notifier
4213 * @clk: struct clk *
4214 * @nb: struct notifier_block * with callback info
4215 *
4216 * Request no further notification for changes to 'clk' and frees memory
4217 * allocated in clk_notifier_register.
4218 *
4219 * Returns -EINVAL if called with null arguments; otherwise, passes
4220 * along the return value of srcu_notifier_chain_unregister().
4221 */
clk_notifier_unregister(struct clk * clk,struct notifier_block * nb)4222 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4223 {
4224 struct clk_notifier *cn = NULL;
4225 int ret = -EINVAL;
4226
4227 if (!clk || !nb)
4228 return -EINVAL;
4229
4230 clk_prepare_lock();
4231
4232 list_for_each_entry(cn, &clk_notifier_list, node)
4233 if (cn->clk == clk)
4234 break;
4235
4236 if (cn->clk == clk) {
4237 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4238
4239 clk->core->notifier_count--;
4240
4241 /* XXX the notifier code should handle this better */
4242 if (!cn->notifier_head.head) {
4243 srcu_cleanup_notifier_head(&cn->notifier_head);
4244 list_del(&cn->node);
4245 kfree(cn);
4246 }
4247
4248 } else {
4249 ret = -ENOENT;
4250 }
4251
4252 clk_prepare_unlock();
4253
4254 return ret;
4255 }
4256 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
4257
4258 #ifdef CONFIG_OF
clk_core_reparent_orphans(void)4259 static void clk_core_reparent_orphans(void)
4260 {
4261 clk_prepare_lock();
4262 clk_core_reparent_orphans_nolock();
4263 clk_prepare_unlock();
4264 }
4265
4266 /**
4267 * struct of_clk_provider - Clock provider registration structure
4268 * @link: Entry in global list of clock providers
4269 * @node: Pointer to device tree node of clock provider
4270 * @get: Get clock callback. Returns NULL or a struct clk for the
4271 * given clock specifier
4272 * @data: context pointer to be passed into @get callback
4273 */
4274 struct of_clk_provider {
4275 struct list_head link;
4276
4277 struct device_node *node;
4278 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4279 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
4280 void *data;
4281 };
4282
4283 extern struct of_device_id __clk_of_table;
4284 static const struct of_device_id __clk_of_table_sentinel
4285 __used __section(__clk_of_table_end);
4286
4287 static LIST_HEAD(of_clk_providers);
4288 static DEFINE_MUTEX(of_clk_mutex);
4289
of_clk_src_simple_get(struct of_phandle_args * clkspec,void * data)4290 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4291 void *data)
4292 {
4293 return data;
4294 }
4295 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4296
of_clk_hw_simple_get(struct of_phandle_args * clkspec,void * data)4297 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4298 {
4299 return data;
4300 }
4301 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4302
of_clk_src_onecell_get(struct of_phandle_args * clkspec,void * data)4303 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4304 {
4305 struct clk_onecell_data *clk_data = data;
4306 unsigned int idx = clkspec->args[0];
4307
4308 if (idx >= clk_data->clk_num) {
4309 pr_err("%s: invalid clock index %u\n", __func__, idx);
4310 return ERR_PTR(-EINVAL);
4311 }
4312
4313 return clk_data->clks[idx];
4314 }
4315 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4316
4317 struct clk_hw *
of_clk_hw_onecell_get(struct of_phandle_args * clkspec,void * data)4318 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4319 {
4320 struct clk_hw_onecell_data *hw_data = data;
4321 unsigned int idx = clkspec->args[0];
4322
4323 if (idx >= hw_data->num) {
4324 pr_err("%s: invalid index %u\n", __func__, idx);
4325 return ERR_PTR(-EINVAL);
4326 }
4327
4328 return hw_data->hws[idx];
4329 }
4330 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4331
4332 /**
4333 * of_clk_add_provider() - Register a clock provider for a node
4334 * @np: Device node pointer associated with clock provider
4335 * @clk_src_get: callback for decoding clock
4336 * @data: context pointer for @clk_src_get callback.
4337 *
4338 * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
4339 */
of_clk_add_provider(struct device_node * np,struct clk * (* clk_src_get)(struct of_phandle_args * clkspec,void * data),void * data)4340 int of_clk_add_provider(struct device_node *np,
4341 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4342 void *data),
4343 void *data)
4344 {
4345 struct of_clk_provider *cp;
4346 int ret;
4347
4348 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4349 if (!cp)
4350 return -ENOMEM;
4351
4352 cp->node = of_node_get(np);
4353 cp->data = data;
4354 cp->get = clk_src_get;
4355
4356 mutex_lock(&of_clk_mutex);
4357 list_add(&cp->link, &of_clk_providers);
4358 mutex_unlock(&of_clk_mutex);
4359 pr_debug("Added clock from %pOF\n", np);
4360
4361 clk_core_reparent_orphans();
4362
4363 ret = of_clk_set_defaults(np, true);
4364 if (ret < 0)
4365 of_clk_del_provider(np);
4366
4367 return ret;
4368 }
4369 EXPORT_SYMBOL_GPL(of_clk_add_provider);
4370
4371 /**
4372 * of_clk_add_hw_provider() - Register a clock provider for a node
4373 * @np: Device node pointer associated with clock provider
4374 * @get: callback for decoding clk_hw
4375 * @data: context pointer for @get callback.
4376 */
of_clk_add_hw_provider(struct device_node * np,struct clk_hw * (* get)(struct of_phandle_args * clkspec,void * data),void * data)4377 int of_clk_add_hw_provider(struct device_node *np,
4378 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4379 void *data),
4380 void *data)
4381 {
4382 struct of_clk_provider *cp;
4383 int ret;
4384
4385 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4386 if (!cp)
4387 return -ENOMEM;
4388
4389 cp->node = of_node_get(np);
4390 cp->data = data;
4391 cp->get_hw = get;
4392
4393 mutex_lock(&of_clk_mutex);
4394 list_add(&cp->link, &of_clk_providers);
4395 mutex_unlock(&of_clk_mutex);
4396 pr_debug("Added clk_hw provider from %pOF\n", np);
4397
4398 clk_core_reparent_orphans();
4399
4400 ret = of_clk_set_defaults(np, true);
4401 if (ret < 0)
4402 of_clk_del_provider(np);
4403
4404 return ret;
4405 }
4406 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
4407
devm_of_clk_release_provider(struct device * dev,void * res)4408 static void devm_of_clk_release_provider(struct device *dev, void *res)
4409 {
4410 of_clk_del_provider(*(struct device_node **)res);
4411 }
4412
4413 /*
4414 * We allow a child device to use its parent device as the clock provider node
4415 * for cases like MFD sub-devices where the child device driver wants to use
4416 * devm_*() APIs but not list the device in DT as a sub-node.
4417 */
get_clk_provider_node(struct device * dev)4418 static struct device_node *get_clk_provider_node(struct device *dev)
4419 {
4420 struct device_node *np, *parent_np;
4421
4422 np = dev->of_node;
4423 parent_np = dev->parent ? dev->parent->of_node : NULL;
4424
4425 if (!of_find_property(np, "#clock-cells", NULL))
4426 if (of_find_property(parent_np, "#clock-cells", NULL))
4427 np = parent_np;
4428
4429 return np;
4430 }
4431
4432 /**
4433 * devm_of_clk_add_hw_provider() - Managed clk provider node registration
4434 * @dev: Device acting as the clock provider (used for DT node and lifetime)
4435 * @get: callback for decoding clk_hw
4436 * @data: context pointer for @get callback
4437 *
4438 * Registers clock provider for given device's node. If the device has no DT
4439 * node or if the device node lacks of clock provider information (#clock-cells)
4440 * then the parent device's node is scanned for this information. If parent node
4441 * has the #clock-cells then it is used in registration. Provider is
4442 * automatically released at device exit.
4443 *
4444 * Return: 0 on success or an errno on failure.
4445 */
devm_of_clk_add_hw_provider(struct device * dev,struct clk_hw * (* get)(struct of_phandle_args * clkspec,void * data),void * data)4446 int devm_of_clk_add_hw_provider(struct device *dev,
4447 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4448 void *data),
4449 void *data)
4450 {
4451 struct device_node **ptr, *np;
4452 int ret;
4453
4454 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
4455 GFP_KERNEL);
4456 if (!ptr)
4457 return -ENOMEM;
4458
4459 np = get_clk_provider_node(dev);
4460 ret = of_clk_add_hw_provider(np, get, data);
4461 if (!ret) {
4462 *ptr = np;
4463 devres_add(dev, ptr);
4464 } else {
4465 devres_free(ptr);
4466 }
4467
4468 return ret;
4469 }
4470 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
4471
4472 /**
4473 * of_clk_del_provider() - Remove a previously registered clock provider
4474 * @np: Device node pointer associated with clock provider
4475 */
of_clk_del_provider(struct device_node * np)4476 void of_clk_del_provider(struct device_node *np)
4477 {
4478 struct of_clk_provider *cp;
4479
4480 mutex_lock(&of_clk_mutex);
4481 list_for_each_entry(cp, &of_clk_providers, link) {
4482 if (cp->node == np) {
4483 list_del(&cp->link);
4484 of_node_put(cp->node);
4485 kfree(cp);
4486 break;
4487 }
4488 }
4489 mutex_unlock(&of_clk_mutex);
4490 }
4491 EXPORT_SYMBOL_GPL(of_clk_del_provider);
4492
devm_clk_provider_match(struct device * dev,void * res,void * data)4493 static int devm_clk_provider_match(struct device *dev, void *res, void *data)
4494 {
4495 struct device_node **np = res;
4496
4497 if (WARN_ON(!np || !*np))
4498 return 0;
4499
4500 return *np == data;
4501 }
4502
4503 /**
4504 * devm_of_clk_del_provider() - Remove clock provider registered using devm
4505 * @dev: Device to whose lifetime the clock provider was bound
4506 */
devm_of_clk_del_provider(struct device * dev)4507 void devm_of_clk_del_provider(struct device *dev)
4508 {
4509 int ret;
4510 struct device_node *np = get_clk_provider_node(dev);
4511
4512 ret = devres_release(dev, devm_of_clk_release_provider,
4513 devm_clk_provider_match, np);
4514
4515 WARN_ON(ret);
4516 }
4517 EXPORT_SYMBOL(devm_of_clk_del_provider);
4518
4519 /**
4520 * of_parse_clkspec() - Parse a DT clock specifier for a given device node
4521 * @np: device node to parse clock specifier from
4522 * @index: index of phandle to parse clock out of. If index < 0, @name is used
4523 * @name: clock name to find and parse. If name is NULL, the index is used
4524 * @out_args: Result of parsing the clock specifier
4525 *
4526 * Parses a device node's "clocks" and "clock-names" properties to find the
4527 * phandle and cells for the index or name that is desired. The resulting clock
4528 * specifier is placed into @out_args, or an errno is returned when there's a
4529 * parsing error. The @index argument is ignored if @name is non-NULL.
4530 *
4531 * Example:
4532 *
4533 * phandle1: clock-controller@1 {
4534 * #clock-cells = <2>;
4535 * }
4536 *
4537 * phandle2: clock-controller@2 {
4538 * #clock-cells = <1>;
4539 * }
4540 *
4541 * clock-consumer@3 {
4542 * clocks = <&phandle1 1 2 &phandle2 3>;
4543 * clock-names = "name1", "name2";
4544 * }
4545 *
4546 * To get a device_node for `clock-controller@2' node you may call this
4547 * function a few different ways:
4548 *
4549 * of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
4550 * of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
4551 * of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
4552 *
4553 * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
4554 * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
4555 * the "clock-names" property of @np.
4556 */
of_parse_clkspec(const struct device_node * np,int index,const char * name,struct of_phandle_args * out_args)4557 static int of_parse_clkspec(const struct device_node *np, int index,
4558 const char *name, struct of_phandle_args *out_args)
4559 {
4560 int ret = -ENOENT;
4561
4562 /* Walk up the tree of devices looking for a clock property that matches */
4563 while (np) {
4564 /*
4565 * For named clocks, first look up the name in the
4566 * "clock-names" property. If it cannot be found, then index
4567 * will be an error code and of_parse_phandle_with_args() will
4568 * return -EINVAL.
4569 */
4570 if (name)
4571 index = of_property_match_string(np, "clock-names", name);
4572 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4573 index, out_args);
4574 if (!ret)
4575 break;
4576 if (name && index >= 0)
4577 break;
4578
4579 /*
4580 * No matching clock found on this node. If the parent node
4581 * has a "clock-ranges" property, then we can try one of its
4582 * clocks.
4583 */
4584 np = np->parent;
4585 if (np && !of_get_property(np, "clock-ranges", NULL))
4586 break;
4587 index = 0;
4588 }
4589
4590 return ret;
4591 }
4592
4593 static struct clk_hw *
__of_clk_get_hw_from_provider(struct of_clk_provider * provider,struct of_phandle_args * clkspec)4594 __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
4595 struct of_phandle_args *clkspec)
4596 {
4597 struct clk *clk;
4598
4599 if (provider->get_hw)
4600 return provider->get_hw(clkspec, provider->data);
4601
4602 clk = provider->get(clkspec, provider->data);
4603 if (IS_ERR(clk))
4604 return ERR_CAST(clk);
4605 return __clk_get_hw(clk);
4606 }
4607
4608 static struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args * clkspec)4609 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
4610 {
4611 struct of_clk_provider *provider;
4612 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
4613
4614 if (!clkspec)
4615 return ERR_PTR(-EINVAL);
4616
4617 mutex_lock(&of_clk_mutex);
4618 list_for_each_entry(provider, &of_clk_providers, link) {
4619 if (provider->node == clkspec->np) {
4620 hw = __of_clk_get_hw_from_provider(provider, clkspec);
4621 if (!IS_ERR(hw))
4622 break;
4623 }
4624 }
4625 mutex_unlock(&of_clk_mutex);
4626
4627 return hw;
4628 }
4629
4630 /**
4631 * of_clk_get_from_provider() - Lookup a clock from a clock provider
4632 * @clkspec: pointer to a clock specifier data structure
4633 *
4634 * This function looks up a struct clk from the registered list of clock
4635 * providers, an input is a clock specifier data structure as returned
4636 * from the of_parse_phandle_with_args() function call.
4637 */
of_clk_get_from_provider(struct of_phandle_args * clkspec)4638 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4639 {
4640 struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
4641
4642 return clk_hw_create_clk(NULL, hw, NULL, __func__);
4643 }
4644 EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
4645
of_clk_get_hw(struct device_node * np,int index,const char * con_id)4646 struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
4647 const char *con_id)
4648 {
4649 int ret;
4650 struct clk_hw *hw;
4651 struct of_phandle_args clkspec;
4652
4653 ret = of_parse_clkspec(np, index, con_id, &clkspec);
4654 if (ret)
4655 return ERR_PTR(ret);
4656
4657 hw = of_clk_get_hw_from_clkspec(&clkspec);
4658 of_node_put(clkspec.np);
4659
4660 return hw;
4661 }
4662
__of_clk_get(struct device_node * np,int index,const char * dev_id,const char * con_id)4663 static struct clk *__of_clk_get(struct device_node *np,
4664 int index, const char *dev_id,
4665 const char *con_id)
4666 {
4667 struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
4668
4669 return clk_hw_create_clk(NULL, hw, dev_id, con_id);
4670 }
4671
of_clk_get(struct device_node * np,int index)4672 struct clk *of_clk_get(struct device_node *np, int index)
4673 {
4674 return __of_clk_get(np, index, np->full_name, NULL);
4675 }
4676 EXPORT_SYMBOL(of_clk_get);
4677
4678 /**
4679 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
4680 * @np: pointer to clock consumer node
4681 * @name: name of consumer's clock input, or NULL for the first clock reference
4682 *
4683 * This function parses the clocks and clock-names properties,
4684 * and uses them to look up the struct clk from the registered list of clock
4685 * providers.
4686 */
of_clk_get_by_name(struct device_node * np,const char * name)4687 struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
4688 {
4689 if (!np)
4690 return ERR_PTR(-ENOENT);
4691
4692 return __of_clk_get(np, 0, np->full_name, name);
4693 }
4694 EXPORT_SYMBOL(of_clk_get_by_name);
4695
4696 /**
4697 * of_clk_get_parent_count() - Count the number of clocks a device node has
4698 * @np: device node to count
4699 *
4700 * Returns: The number of clocks that are possible parents of this node
4701 */
of_clk_get_parent_count(struct device_node * np)4702 unsigned int of_clk_get_parent_count(struct device_node *np)
4703 {
4704 int count;
4705
4706 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
4707 if (count < 0)
4708 return 0;
4709
4710 return count;
4711 }
4712 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
4713
of_clk_get_parent_name(struct device_node * np,int index)4714 const char *of_clk_get_parent_name(struct device_node *np, int index)
4715 {
4716 struct of_phandle_args clkspec;
4717 struct property *prop;
4718 const char *clk_name;
4719 const __be32 *vp;
4720 u32 pv;
4721 int rc;
4722 int count;
4723 struct clk *clk;
4724
4725 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
4726 &clkspec);
4727 if (rc)
4728 return NULL;
4729
4730 index = clkspec.args_count ? clkspec.args[0] : 0;
4731 count = 0;
4732
4733 /* if there is an indices property, use it to transfer the index
4734 * specified into an array offset for the clock-output-names property.
4735 */
4736 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
4737 if (index == pv) {
4738 index = count;
4739 break;
4740 }
4741 count++;
4742 }
4743 /* We went off the end of 'clock-indices' without finding it */
4744 if (prop && !vp)
4745 return NULL;
4746
4747 if (of_property_read_string_index(clkspec.np, "clock-output-names",
4748 index,
4749 &clk_name) < 0) {
4750 /*
4751 * Best effort to get the name if the clock has been
4752 * registered with the framework. If the clock isn't
4753 * registered, we return the node name as the name of
4754 * the clock as long as #clock-cells = 0.
4755 */
4756 clk = of_clk_get_from_provider(&clkspec);
4757 if (IS_ERR(clk)) {
4758 if (clkspec.args_count == 0)
4759 clk_name = clkspec.np->name;
4760 else
4761 clk_name = NULL;
4762 } else {
4763 clk_name = __clk_get_name(clk);
4764 clk_put(clk);
4765 }
4766 }
4767
4768
4769 of_node_put(clkspec.np);
4770 return clk_name;
4771 }
4772 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
4773
4774 /**
4775 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
4776 * number of parents
4777 * @np: Device node pointer associated with clock provider
4778 * @parents: pointer to char array that hold the parents' names
4779 * @size: size of the @parents array
4780 *
4781 * Return: number of parents for the clock node.
4782 */
of_clk_parent_fill(struct device_node * np,const char ** parents,unsigned int size)4783 int of_clk_parent_fill(struct device_node *np, const char **parents,
4784 unsigned int size)
4785 {
4786 unsigned int i = 0;
4787
4788 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
4789 i++;
4790
4791 return i;
4792 }
4793 EXPORT_SYMBOL_GPL(of_clk_parent_fill);
4794
4795 struct clock_provider {
4796 void (*clk_init_cb)(struct device_node *);
4797 struct device_node *np;
4798 struct list_head node;
4799 };
4800
4801 /*
4802 * This function looks for a parent clock. If there is one, then it
4803 * checks that the provider for this parent clock was initialized, in
4804 * this case the parent clock will be ready.
4805 */
parent_ready(struct device_node * np)4806 static int parent_ready(struct device_node *np)
4807 {
4808 int i = 0;
4809
4810 while (true) {
4811 struct clk *clk = of_clk_get(np, i);
4812
4813 /* this parent is ready we can check the next one */
4814 if (!IS_ERR(clk)) {
4815 clk_put(clk);
4816 i++;
4817 continue;
4818 }
4819
4820 /* at least one parent is not ready, we exit now */
4821 if (PTR_ERR(clk) == -EPROBE_DEFER)
4822 return 0;
4823
4824 /*
4825 * Here we make assumption that the device tree is
4826 * written correctly. So an error means that there is
4827 * no more parent. As we didn't exit yet, then the
4828 * previous parent are ready. If there is no clock
4829 * parent, no need to wait for them, then we can
4830 * consider their absence as being ready
4831 */
4832 return 1;
4833 }
4834 }
4835
4836 /**
4837 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
4838 * @np: Device node pointer associated with clock provider
4839 * @index: clock index
4840 * @flags: pointer to top-level framework flags
4841 *
4842 * Detects if the clock-critical property exists and, if so, sets the
4843 * corresponding CLK_IS_CRITICAL flag.
4844 *
4845 * Do not use this function. It exists only for legacy Device Tree
4846 * bindings, such as the one-clock-per-node style that are outdated.
4847 * Those bindings typically put all clock data into .dts and the Linux
4848 * driver has no clock data, thus making it impossible to set this flag
4849 * correctly from the driver. Only those drivers may call
4850 * of_clk_detect_critical from their setup functions.
4851 *
4852 * Return: error code or zero on success
4853 */
of_clk_detect_critical(struct device_node * np,int index,unsigned long * flags)4854 int of_clk_detect_critical(struct device_node *np,
4855 int index, unsigned long *flags)
4856 {
4857 struct property *prop;
4858 const __be32 *cur;
4859 uint32_t idx;
4860
4861 if (!np || !flags)
4862 return -EINVAL;
4863
4864 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
4865 if (index == idx)
4866 *flags |= CLK_IS_CRITICAL;
4867
4868 return 0;
4869 }
4870
4871 /**
4872 * of_clk_init() - Scan and init clock providers from the DT
4873 * @matches: array of compatible values and init functions for providers.
4874 *
4875 * This function scans the device tree for matching clock providers
4876 * and calls their initialization functions. It also does it by trying
4877 * to follow the dependencies.
4878 */
of_clk_init(const struct of_device_id * matches)4879 void __init of_clk_init(const struct of_device_id *matches)
4880 {
4881 const struct of_device_id *match;
4882 struct device_node *np;
4883 struct clock_provider *clk_provider, *next;
4884 bool is_init_done;
4885 bool force = false;
4886 LIST_HEAD(clk_provider_list);
4887
4888 if (!matches)
4889 matches = &__clk_of_table;
4890
4891 /* First prepare the list of the clocks providers */
4892 for_each_matching_node_and_match(np, matches, &match) {
4893 struct clock_provider *parent;
4894
4895 if (!of_device_is_available(np))
4896 continue;
4897
4898 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
4899 if (!parent) {
4900 list_for_each_entry_safe(clk_provider, next,
4901 &clk_provider_list, node) {
4902 list_del(&clk_provider->node);
4903 of_node_put(clk_provider->np);
4904 kfree(clk_provider);
4905 }
4906 of_node_put(np);
4907 return;
4908 }
4909
4910 parent->clk_init_cb = match->data;
4911 parent->np = of_node_get(np);
4912 list_add_tail(&parent->node, &clk_provider_list);
4913 }
4914
4915 while (!list_empty(&clk_provider_list)) {
4916 is_init_done = false;
4917 list_for_each_entry_safe(clk_provider, next,
4918 &clk_provider_list, node) {
4919 if (force || parent_ready(clk_provider->np)) {
4920
4921 /* Don't populate platform devices */
4922 of_node_set_flag(clk_provider->np,
4923 OF_POPULATED);
4924
4925 clk_provider->clk_init_cb(clk_provider->np);
4926 of_clk_set_defaults(clk_provider->np, true);
4927
4928 list_del(&clk_provider->node);
4929 of_node_put(clk_provider->np);
4930 kfree(clk_provider);
4931 is_init_done = true;
4932 }
4933 }
4934
4935 /*
4936 * We didn't manage to initialize any of the
4937 * remaining providers during the last loop, so now we
4938 * initialize all the remaining ones unconditionally
4939 * in case the clock parent was not mandatory
4940 */
4941 if (!is_init_done)
4942 force = true;
4943 }
4944 }
4945 #endif
4946