1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5 *
6 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
7 */
8
9 #include <linux/clk.h>
10 #include <linux/clk-provider.h>
11 #include <linux/clk/clk-conf.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/spinlock.h>
15 #include <linux/err.h>
16 #include <linux/list.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/sched.h>
23 #include <linux/clkdev.h>
24
25 #include "clk.h"
26
27 static DEFINE_SPINLOCK(enable_lock);
28 static DEFINE_MUTEX(prepare_lock);
29
30 static struct task_struct *prepare_owner;
31 static struct task_struct *enable_owner;
32
33 static int prepare_refcnt;
34 static int enable_refcnt;
35
36 static HLIST_HEAD(clk_root_list);
37 static HLIST_HEAD(clk_orphan_list);
38 static LIST_HEAD(clk_notifier_list);
39
40 static struct hlist_head *all_lists[] = {
41 &clk_root_list,
42 &clk_orphan_list,
43 NULL,
44 };
45
46 /*** private data structures ***/
47
48 struct clk_parent_map {
49 const struct clk_hw *hw;
50 struct clk_core *core;
51 const char *fw_name;
52 const char *name;
53 int index;
54 };
55
56 struct clk_core {
57 const char *name;
58 const struct clk_ops *ops;
59 struct clk_hw *hw;
60 struct module *owner;
61 struct device *dev;
62 struct device_node *of_node;
63 struct clk_core *parent;
64 struct clk_parent_map *parents;
65 u8 num_parents;
66 u8 new_parent_index;
67 unsigned long rate;
68 unsigned long req_rate;
69 unsigned long new_rate;
70 struct clk_core *new_parent;
71 struct clk_core *new_child;
72 unsigned long flags;
73 bool orphan;
74 bool rpm_enabled;
75 unsigned int enable_count;
76 unsigned int prepare_count;
77 unsigned int protect_count;
78 unsigned long min_rate;
79 unsigned long max_rate;
80 unsigned long accuracy;
81 int phase;
82 struct clk_duty duty;
83 struct hlist_head children;
84 struct hlist_node child_node;
85 struct hlist_head clks;
86 unsigned int notifier_count;
87 #ifdef CONFIG_DEBUG_FS
88 struct dentry *dentry;
89 struct hlist_node debug_node;
90 #endif
91 struct kref ref;
92 };
93
94 #define CREATE_TRACE_POINTS
95 #include <trace/events/clk.h>
96
97 struct clk {
98 struct clk_core *core;
99 struct device *dev;
100 const char *dev_id;
101 const char *con_id;
102 unsigned long min_rate;
103 unsigned long max_rate;
104 unsigned int exclusive_count;
105 struct hlist_node clks_node;
106 };
107
108 /*** runtime pm ***/
clk_pm_runtime_get(struct clk_core * core)109 static int clk_pm_runtime_get(struct clk_core *core)
110 {
111 int ret;
112
113 if (!core->rpm_enabled)
114 return 0;
115
116 ret = pm_runtime_get_sync(core->dev);
117 if (ret < 0) {
118 pm_runtime_put_noidle(core->dev);
119 return ret;
120 }
121 return 0;
122 }
123
clk_pm_runtime_put(struct clk_core * core)124 static void clk_pm_runtime_put(struct clk_core *core)
125 {
126 if (!core->rpm_enabled)
127 return;
128
129 pm_runtime_put_sync(core->dev);
130 }
131
132 /*** locking ***/
clk_prepare_lock(void)133 static void clk_prepare_lock(void)
134 {
135 if (!mutex_trylock(&prepare_lock)) {
136 if (prepare_owner == current) {
137 prepare_refcnt++;
138 return;
139 }
140 mutex_lock(&prepare_lock);
141 }
142 WARN_ON_ONCE(prepare_owner != NULL);
143 WARN_ON_ONCE(prepare_refcnt != 0);
144 prepare_owner = current;
145 prepare_refcnt = 1;
146 }
147
clk_prepare_unlock(void)148 static void clk_prepare_unlock(void)
149 {
150 WARN_ON_ONCE(prepare_owner != current);
151 WARN_ON_ONCE(prepare_refcnt == 0);
152
153 if (--prepare_refcnt)
154 return;
155 prepare_owner = NULL;
156 mutex_unlock(&prepare_lock);
157 }
158
clk_enable_lock(void)159 static unsigned long clk_enable_lock(void)
160 __acquires(enable_lock)
161 {
162 unsigned long flags;
163
164 /*
165 * On UP systems, spin_trylock_irqsave() always returns true, even if
166 * we already hold the lock. So, in that case, we rely only on
167 * reference counting.
168 */
169 if (!IS_ENABLED(CONFIG_SMP) ||
170 !spin_trylock_irqsave(&enable_lock, flags)) {
171 if (enable_owner == current) {
172 enable_refcnt++;
173 __acquire(enable_lock);
174 if (!IS_ENABLED(CONFIG_SMP))
175 local_save_flags(flags);
176 return flags;
177 }
178 spin_lock_irqsave(&enable_lock, flags);
179 }
180 WARN_ON_ONCE(enable_owner != NULL);
181 WARN_ON_ONCE(enable_refcnt != 0);
182 enable_owner = current;
183 enable_refcnt = 1;
184 return flags;
185 }
186
clk_enable_unlock(unsigned long flags)187 static void clk_enable_unlock(unsigned long flags)
188 __releases(enable_lock)
189 {
190 WARN_ON_ONCE(enable_owner != current);
191 WARN_ON_ONCE(enable_refcnt == 0);
192
193 if (--enable_refcnt) {
194 __release(enable_lock);
195 return;
196 }
197 enable_owner = NULL;
198 spin_unlock_irqrestore(&enable_lock, flags);
199 }
200
clk_core_rate_is_protected(struct clk_core * core)201 static bool clk_core_rate_is_protected(struct clk_core *core)
202 {
203 return core->protect_count;
204 }
205
clk_core_is_prepared(struct clk_core * core)206 static bool clk_core_is_prepared(struct clk_core *core)
207 {
208 bool ret = false;
209
210 /*
211 * .is_prepared is optional for clocks that can prepare
212 * fall back to software usage counter if it is missing
213 */
214 if (!core->ops->is_prepared)
215 return core->prepare_count;
216
217 if (!clk_pm_runtime_get(core)) {
218 ret = core->ops->is_prepared(core->hw);
219 clk_pm_runtime_put(core);
220 }
221
222 return ret;
223 }
224
clk_core_is_enabled(struct clk_core * core)225 static bool clk_core_is_enabled(struct clk_core *core)
226 {
227 bool ret = false;
228
229 /*
230 * .is_enabled is only mandatory for clocks that gate
231 * fall back to software usage counter if .is_enabled is missing
232 */
233 if (!core->ops->is_enabled)
234 return core->enable_count;
235
236 /*
237 * Check if clock controller's device is runtime active before
238 * calling .is_enabled callback. If not, assume that clock is
239 * disabled, because we might be called from atomic context, from
240 * which pm_runtime_get() is not allowed.
241 * This function is called mainly from clk_disable_unused_subtree,
242 * which ensures proper runtime pm activation of controller before
243 * taking enable spinlock, but the below check is needed if one tries
244 * to call it from other places.
245 */
246 if (core->rpm_enabled) {
247 pm_runtime_get_noresume(core->dev);
248 if (!pm_runtime_active(core->dev)) {
249 ret = false;
250 goto done;
251 }
252 }
253
254 ret = core->ops->is_enabled(core->hw);
255 done:
256 if (core->rpm_enabled)
257 pm_runtime_put(core->dev);
258
259 return ret;
260 }
261
262 /*** helper functions ***/
263
__clk_get_name(const struct clk * clk)264 const char *__clk_get_name(const struct clk *clk)
265 {
266 return !clk ? NULL : clk->core->name;
267 }
268 EXPORT_SYMBOL_GPL(__clk_get_name);
269
clk_hw_get_name(const struct clk_hw * hw)270 const char *clk_hw_get_name(const struct clk_hw *hw)
271 {
272 return hw->core->name;
273 }
274 EXPORT_SYMBOL_GPL(clk_hw_get_name);
275
__clk_get_hw(struct clk * clk)276 struct clk_hw *__clk_get_hw(struct clk *clk)
277 {
278 return !clk ? NULL : clk->core->hw;
279 }
280 EXPORT_SYMBOL_GPL(__clk_get_hw);
281
clk_hw_get_num_parents(const struct clk_hw * hw)282 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
283 {
284 return hw->core->num_parents;
285 }
286 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
287
clk_hw_get_parent(const struct clk_hw * hw)288 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
289 {
290 return hw->core->parent ? hw->core->parent->hw : NULL;
291 }
292 EXPORT_SYMBOL_GPL(clk_hw_get_parent);
293
__clk_lookup_subtree(const char * name,struct clk_core * core)294 static struct clk_core *__clk_lookup_subtree(const char *name,
295 struct clk_core *core)
296 {
297 struct clk_core *child;
298 struct clk_core *ret;
299
300 if (!strcmp(core->name, name))
301 return core;
302
303 hlist_for_each_entry(child, &core->children, child_node) {
304 ret = __clk_lookup_subtree(name, child);
305 if (ret)
306 return ret;
307 }
308
309 return NULL;
310 }
311
clk_core_lookup(const char * name)312 static struct clk_core *clk_core_lookup(const char *name)
313 {
314 struct clk_core *root_clk;
315 struct clk_core *ret;
316
317 if (!name)
318 return NULL;
319
320 /* search the 'proper' clk tree first */
321 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
322 ret = __clk_lookup_subtree(name, root_clk);
323 if (ret)
324 return ret;
325 }
326
327 /* if not found, then search the orphan tree */
328 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
329 ret = __clk_lookup_subtree(name, root_clk);
330 if (ret)
331 return ret;
332 }
333
334 return NULL;
335 }
336
337 #ifdef CONFIG_OF
338 static int of_parse_clkspec(const struct device_node *np, int index,
339 const char *name, struct of_phandle_args *out_args);
340 static struct clk_hw *
341 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
342 #else
of_parse_clkspec(const struct device_node * np,int index,const char * name,struct of_phandle_args * out_args)343 static inline int of_parse_clkspec(const struct device_node *np, int index,
344 const char *name,
345 struct of_phandle_args *out_args)
346 {
347 return -ENOENT;
348 }
349 static inline struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args * clkspec)350 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
351 {
352 return ERR_PTR(-ENOENT);
353 }
354 #endif
355
356 /**
357 * clk_core_get - Find the clk_core parent of a clk
358 * @core: clk to find parent of
359 * @p_index: parent index to search for
360 *
361 * This is the preferred method for clk providers to find the parent of a
362 * clk when that parent is external to the clk controller. The parent_names
363 * array is indexed and treated as a local name matching a string in the device
364 * node's 'clock-names' property or as the 'con_id' matching the device's
365 * dev_name() in a clk_lookup. This allows clk providers to use their own
366 * namespace instead of looking for a globally unique parent string.
367 *
368 * For example the following DT snippet would allow a clock registered by the
369 * clock-controller@c001 that has a clk_init_data::parent_data array
370 * with 'xtal' in the 'name' member to find the clock provided by the
371 * clock-controller@f00abcd without needing to get the globally unique name of
372 * the xtal clk.
373 *
374 * parent: clock-controller@f00abcd {
375 * reg = <0xf00abcd 0xabcd>;
376 * #clock-cells = <0>;
377 * };
378 *
379 * clock-controller@c001 {
380 * reg = <0xc001 0xf00d>;
381 * clocks = <&parent>;
382 * clock-names = "xtal";
383 * #clock-cells = <1>;
384 * };
385 *
386 * Returns: -ENOENT when the provider can't be found or the clk doesn't
387 * exist in the provider or the name can't be found in the DT node or
388 * in a clkdev lookup. NULL when the provider knows about the clk but it
389 * isn't provided on this system.
390 * A valid clk_core pointer when the clk can be found in the provider.
391 */
clk_core_get(struct clk_core * core,u8 p_index)392 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
393 {
394 const char *name = core->parents[p_index].fw_name;
395 int index = core->parents[p_index].index;
396 struct clk_hw *hw = ERR_PTR(-ENOENT);
397 struct device *dev = core->dev;
398 const char *dev_id = dev ? dev_name(dev) : NULL;
399 struct device_node *np = core->of_node;
400 struct of_phandle_args clkspec;
401
402 if (np && (name || index >= 0) &&
403 !of_parse_clkspec(np, index, name, &clkspec)) {
404 hw = of_clk_get_hw_from_clkspec(&clkspec);
405 of_node_put(clkspec.np);
406 } else if (name) {
407 /*
408 * If the DT search above couldn't find the provider fallback to
409 * looking up via clkdev based clk_lookups.
410 */
411 hw = clk_find_hw(dev_id, name);
412 }
413
414 if (IS_ERR(hw))
415 return ERR_CAST(hw);
416
417 return hw->core;
418 }
419
clk_core_fill_parent_index(struct clk_core * core,u8 index)420 static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
421 {
422 struct clk_parent_map *entry = &core->parents[index];
423 struct clk_core *parent = ERR_PTR(-ENOENT);
424
425 if (entry->hw) {
426 parent = entry->hw->core;
427 /*
428 * We have a direct reference but it isn't registered yet?
429 * Orphan it and let clk_reparent() update the orphan status
430 * when the parent is registered.
431 */
432 if (!parent)
433 parent = ERR_PTR(-EPROBE_DEFER);
434 } else {
435 parent = clk_core_get(core, index);
436 if (PTR_ERR(parent) == -ENOENT && entry->name)
437 parent = clk_core_lookup(entry->name);
438 }
439
440 /* Only cache it if it's not an error */
441 if (!IS_ERR(parent))
442 entry->core = parent;
443 }
444
clk_core_get_parent_by_index(struct clk_core * core,u8 index)445 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
446 u8 index)
447 {
448 if (!core || index >= core->num_parents || !core->parents)
449 return NULL;
450
451 if (!core->parents[index].core)
452 clk_core_fill_parent_index(core, index);
453
454 return core->parents[index].core;
455 }
456
457 struct clk_hw *
clk_hw_get_parent_by_index(const struct clk_hw * hw,unsigned int index)458 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
459 {
460 struct clk_core *parent;
461
462 parent = clk_core_get_parent_by_index(hw->core, index);
463
464 return !parent ? NULL : parent->hw;
465 }
466 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
467
__clk_get_enable_count(struct clk * clk)468 unsigned int __clk_get_enable_count(struct clk *clk)
469 {
470 return !clk ? 0 : clk->core->enable_count;
471 }
472
clk_core_get_rate_nolock(struct clk_core * core)473 static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
474 {
475 if (!core)
476 return 0;
477
478 if (!core->num_parents || core->parent)
479 return core->rate;
480
481 /*
482 * Clk must have a parent because num_parents > 0 but the parent isn't
483 * known yet. Best to return 0 as the rate of this clk until we can
484 * properly recalc the rate based on the parent's rate.
485 */
486 return 0;
487 }
488
clk_hw_get_rate(const struct clk_hw * hw)489 unsigned long clk_hw_get_rate(const struct clk_hw *hw)
490 {
491 return clk_core_get_rate_nolock(hw->core);
492 }
493 EXPORT_SYMBOL_GPL(clk_hw_get_rate);
494
clk_core_get_accuracy_no_lock(struct clk_core * core)495 static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
496 {
497 if (!core)
498 return 0;
499
500 return core->accuracy;
501 }
502
clk_hw_get_flags(const struct clk_hw * hw)503 unsigned long clk_hw_get_flags(const struct clk_hw *hw)
504 {
505 return hw->core->flags;
506 }
507 EXPORT_SYMBOL_GPL(clk_hw_get_flags);
508
clk_hw_is_prepared(const struct clk_hw * hw)509 bool clk_hw_is_prepared(const struct clk_hw *hw)
510 {
511 return clk_core_is_prepared(hw->core);
512 }
513 EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
514
clk_hw_rate_is_protected(const struct clk_hw * hw)515 bool clk_hw_rate_is_protected(const struct clk_hw *hw)
516 {
517 return clk_core_rate_is_protected(hw->core);
518 }
519 EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
520
clk_hw_is_enabled(const struct clk_hw * hw)521 bool clk_hw_is_enabled(const struct clk_hw *hw)
522 {
523 return clk_core_is_enabled(hw->core);
524 }
525 EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
526
__clk_is_enabled(struct clk * clk)527 bool __clk_is_enabled(struct clk *clk)
528 {
529 if (!clk)
530 return false;
531
532 return clk_core_is_enabled(clk->core);
533 }
534 EXPORT_SYMBOL_GPL(__clk_is_enabled);
535
mux_is_better_rate(unsigned long rate,unsigned long now,unsigned long best,unsigned long flags)536 static bool mux_is_better_rate(unsigned long rate, unsigned long now,
537 unsigned long best, unsigned long flags)
538 {
539 if (flags & CLK_MUX_ROUND_CLOSEST)
540 return abs(now - rate) < abs(best - rate);
541
542 return now <= rate && now > best;
543 }
544
clk_mux_determine_rate_flags(struct clk_hw * hw,struct clk_rate_request * req,unsigned long flags)545 int clk_mux_determine_rate_flags(struct clk_hw *hw,
546 struct clk_rate_request *req,
547 unsigned long flags)
548 {
549 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
550 int i, num_parents, ret;
551 unsigned long best = 0;
552 struct clk_rate_request parent_req = *req;
553
554 /* if NO_REPARENT flag set, pass through to current parent */
555 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
556 parent = core->parent;
557 if (core->flags & CLK_SET_RATE_PARENT) {
558 ret = __clk_determine_rate(parent ? parent->hw : NULL,
559 &parent_req);
560 if (ret)
561 return ret;
562
563 best = parent_req.rate;
564 } else if (parent) {
565 best = clk_core_get_rate_nolock(parent);
566 } else {
567 best = clk_core_get_rate_nolock(core);
568 }
569
570 goto out;
571 }
572
573 /* find the parent that can provide the fastest rate <= rate */
574 num_parents = core->num_parents;
575 for (i = 0; i < num_parents; i++) {
576 parent = clk_core_get_parent_by_index(core, i);
577 if (!parent)
578 continue;
579
580 if (core->flags & CLK_SET_RATE_PARENT) {
581 parent_req = *req;
582 ret = __clk_determine_rate(parent->hw, &parent_req);
583 if (ret)
584 continue;
585 } else {
586 parent_req.rate = clk_core_get_rate_nolock(parent);
587 }
588
589 if (mux_is_better_rate(req->rate, parent_req.rate,
590 best, flags)) {
591 best_parent = parent;
592 best = parent_req.rate;
593 }
594 }
595
596 if (!best_parent)
597 return -EINVAL;
598
599 out:
600 if (best_parent)
601 req->best_parent_hw = best_parent->hw;
602 req->best_parent_rate = best;
603 req->rate = best;
604
605 return 0;
606 }
607 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
608
__clk_lookup(const char * name)609 struct clk *__clk_lookup(const char *name)
610 {
611 struct clk_core *core = clk_core_lookup(name);
612
613 return !core ? NULL : core->hw->clk;
614 }
615
clk_core_get_boundaries(struct clk_core * core,unsigned long * min_rate,unsigned long * max_rate)616 static void clk_core_get_boundaries(struct clk_core *core,
617 unsigned long *min_rate,
618 unsigned long *max_rate)
619 {
620 struct clk *clk_user;
621
622 lockdep_assert_held(&prepare_lock);
623
624 *min_rate = core->min_rate;
625 *max_rate = core->max_rate;
626
627 hlist_for_each_entry(clk_user, &core->clks, clks_node)
628 *min_rate = max(*min_rate, clk_user->min_rate);
629
630 hlist_for_each_entry(clk_user, &core->clks, clks_node)
631 *max_rate = min(*max_rate, clk_user->max_rate);
632 }
633
clk_hw_set_rate_range(struct clk_hw * hw,unsigned long min_rate,unsigned long max_rate)634 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
635 unsigned long max_rate)
636 {
637 hw->core->min_rate = min_rate;
638 hw->core->max_rate = max_rate;
639 }
640 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
641
642 /*
643 * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
644 * @hw: mux type clk to determine rate on
645 * @req: rate request, also used to return preferred parent and frequencies
646 *
647 * Helper for finding best parent to provide a given frequency. This can be used
648 * directly as a determine_rate callback (e.g. for a mux), or from a more
649 * complex clock that may combine a mux with other operations.
650 *
651 * Returns: 0 on success, -EERROR value on error
652 */
__clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)653 int __clk_mux_determine_rate(struct clk_hw *hw,
654 struct clk_rate_request *req)
655 {
656 return clk_mux_determine_rate_flags(hw, req, 0);
657 }
658 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
659
__clk_mux_determine_rate_closest(struct clk_hw * hw,struct clk_rate_request * req)660 int __clk_mux_determine_rate_closest(struct clk_hw *hw,
661 struct clk_rate_request *req)
662 {
663 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
664 }
665 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
666
667 /*** clk api ***/
668
clk_core_rate_unprotect(struct clk_core * core)669 static void clk_core_rate_unprotect(struct clk_core *core)
670 {
671 lockdep_assert_held(&prepare_lock);
672
673 if (!core)
674 return;
675
676 if (WARN(core->protect_count == 0,
677 "%s already unprotected\n", core->name))
678 return;
679
680 if (--core->protect_count > 0)
681 return;
682
683 clk_core_rate_unprotect(core->parent);
684 }
685
clk_core_rate_nuke_protect(struct clk_core * core)686 static int clk_core_rate_nuke_protect(struct clk_core *core)
687 {
688 int ret;
689
690 lockdep_assert_held(&prepare_lock);
691
692 if (!core)
693 return -EINVAL;
694
695 if (core->protect_count == 0)
696 return 0;
697
698 ret = core->protect_count;
699 core->protect_count = 1;
700 clk_core_rate_unprotect(core);
701
702 return ret;
703 }
704
705 /**
706 * clk_rate_exclusive_put - release exclusivity over clock rate control
707 * @clk: the clk over which the exclusivity is released
708 *
709 * clk_rate_exclusive_put() completes a critical section during which a clock
710 * consumer cannot tolerate any other consumer making any operation on the
711 * clock which could result in a rate change or rate glitch. Exclusive clocks
712 * cannot have their rate changed, either directly or indirectly due to changes
713 * further up the parent chain of clocks. As a result, clocks up parent chain
714 * also get under exclusive control of the calling consumer.
715 *
716 * If exlusivity is claimed more than once on clock, even by the same consumer,
717 * the rate effectively gets locked as exclusivity can't be preempted.
718 *
719 * Calls to clk_rate_exclusive_put() must be balanced with calls to
720 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
721 * error status.
722 */
clk_rate_exclusive_put(struct clk * clk)723 void clk_rate_exclusive_put(struct clk *clk)
724 {
725 if (!clk)
726 return;
727
728 clk_prepare_lock();
729
730 /*
731 * if there is something wrong with this consumer protect count, stop
732 * here before messing with the provider
733 */
734 if (WARN_ON(clk->exclusive_count <= 0))
735 goto out;
736
737 clk_core_rate_unprotect(clk->core);
738 clk->exclusive_count--;
739 out:
740 clk_prepare_unlock();
741 }
742 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
743
clk_core_rate_protect(struct clk_core * core)744 static void clk_core_rate_protect(struct clk_core *core)
745 {
746 lockdep_assert_held(&prepare_lock);
747
748 if (!core)
749 return;
750
751 if (core->protect_count == 0)
752 clk_core_rate_protect(core->parent);
753
754 core->protect_count++;
755 }
756
clk_core_rate_restore_protect(struct clk_core * core,int count)757 static void clk_core_rate_restore_protect(struct clk_core *core, int count)
758 {
759 lockdep_assert_held(&prepare_lock);
760
761 if (!core)
762 return;
763
764 if (count == 0)
765 return;
766
767 clk_core_rate_protect(core);
768 core->protect_count = count;
769 }
770
771 /**
772 * clk_rate_exclusive_get - get exclusivity over the clk rate control
773 * @clk: the clk over which the exclusity of rate control is requested
774 *
775 * clk_rate_exclusive_get() begins a critical section during which a clock
776 * consumer cannot tolerate any other consumer making any operation on the
777 * clock which could result in a rate change or rate glitch. Exclusive clocks
778 * cannot have their rate changed, either directly or indirectly due to changes
779 * further up the parent chain of clocks. As a result, clocks up parent chain
780 * also get under exclusive control of the calling consumer.
781 *
782 * If exlusivity is claimed more than once on clock, even by the same consumer,
783 * the rate effectively gets locked as exclusivity can't be preempted.
784 *
785 * Calls to clk_rate_exclusive_get() should be balanced with calls to
786 * clk_rate_exclusive_put(). Calls to this function may sleep.
787 * Returns 0 on success, -EERROR otherwise
788 */
clk_rate_exclusive_get(struct clk * clk)789 int clk_rate_exclusive_get(struct clk *clk)
790 {
791 if (!clk)
792 return 0;
793
794 clk_prepare_lock();
795 clk_core_rate_protect(clk->core);
796 clk->exclusive_count++;
797 clk_prepare_unlock();
798
799 return 0;
800 }
801 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
802
clk_core_unprepare(struct clk_core * core)803 static void clk_core_unprepare(struct clk_core *core)
804 {
805 lockdep_assert_held(&prepare_lock);
806
807 if (!core)
808 return;
809
810 if (WARN(core->prepare_count == 0,
811 "%s already unprepared\n", core->name))
812 return;
813
814 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
815 "Unpreparing critical %s\n", core->name))
816 return;
817
818 if (core->flags & CLK_SET_RATE_GATE)
819 clk_core_rate_unprotect(core);
820
821 if (--core->prepare_count > 0)
822 return;
823
824 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
825
826 trace_clk_unprepare(core);
827
828 if (core->ops->unprepare)
829 core->ops->unprepare(core->hw);
830
831 clk_pm_runtime_put(core);
832
833 trace_clk_unprepare_complete(core);
834 clk_core_unprepare(core->parent);
835 }
836
clk_core_unprepare_lock(struct clk_core * core)837 static void clk_core_unprepare_lock(struct clk_core *core)
838 {
839 clk_prepare_lock();
840 clk_core_unprepare(core);
841 clk_prepare_unlock();
842 }
843
844 /**
845 * clk_unprepare - undo preparation of a clock source
846 * @clk: the clk being unprepared
847 *
848 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
849 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
850 * if the operation may sleep. One example is a clk which is accessed over
851 * I2c. In the complex case a clk gate operation may require a fast and a slow
852 * part. It is this reason that clk_unprepare and clk_disable are not mutually
853 * exclusive. In fact clk_disable must be called before clk_unprepare.
854 */
clk_unprepare(struct clk * clk)855 void clk_unprepare(struct clk *clk)
856 {
857 if (IS_ERR_OR_NULL(clk))
858 return;
859
860 clk_core_unprepare_lock(clk->core);
861 }
862 EXPORT_SYMBOL_GPL(clk_unprepare);
863
clk_core_prepare(struct clk_core * core)864 static int clk_core_prepare(struct clk_core *core)
865 {
866 int ret = 0;
867
868 lockdep_assert_held(&prepare_lock);
869
870 if (!core)
871 return 0;
872
873 if (core->prepare_count == 0) {
874 ret = clk_pm_runtime_get(core);
875 if (ret)
876 return ret;
877
878 ret = clk_core_prepare(core->parent);
879 if (ret)
880 goto runtime_put;
881
882 trace_clk_prepare(core);
883
884 if (core->ops->prepare)
885 ret = core->ops->prepare(core->hw);
886
887 trace_clk_prepare_complete(core);
888
889 if (ret)
890 goto unprepare;
891 }
892
893 core->prepare_count++;
894
895 /*
896 * CLK_SET_RATE_GATE is a special case of clock protection
897 * Instead of a consumer claiming exclusive rate control, it is
898 * actually the provider which prevents any consumer from making any
899 * operation which could result in a rate change or rate glitch while
900 * the clock is prepared.
901 */
902 if (core->flags & CLK_SET_RATE_GATE)
903 clk_core_rate_protect(core);
904
905 return 0;
906 unprepare:
907 clk_core_unprepare(core->parent);
908 runtime_put:
909 clk_pm_runtime_put(core);
910 return ret;
911 }
912
clk_core_prepare_lock(struct clk_core * core)913 static int clk_core_prepare_lock(struct clk_core *core)
914 {
915 int ret;
916
917 clk_prepare_lock();
918 ret = clk_core_prepare(core);
919 clk_prepare_unlock();
920
921 return ret;
922 }
923
924 /**
925 * clk_prepare - prepare a clock source
926 * @clk: the clk being prepared
927 *
928 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
929 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
930 * operation may sleep. One example is a clk which is accessed over I2c. In
931 * the complex case a clk ungate operation may require a fast and a slow part.
932 * It is this reason that clk_prepare and clk_enable are not mutually
933 * exclusive. In fact clk_prepare must be called before clk_enable.
934 * Returns 0 on success, -EERROR otherwise.
935 */
clk_prepare(struct clk * clk)936 int clk_prepare(struct clk *clk)
937 {
938 if (!clk)
939 return 0;
940
941 return clk_core_prepare_lock(clk->core);
942 }
943 EXPORT_SYMBOL_GPL(clk_prepare);
944
clk_core_disable(struct clk_core * core)945 static void clk_core_disable(struct clk_core *core)
946 {
947 lockdep_assert_held(&enable_lock);
948
949 if (!core)
950 return;
951
952 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
953 return;
954
955 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
956 "Disabling critical %s\n", core->name))
957 return;
958
959 if (--core->enable_count > 0)
960 return;
961
962 trace_clk_disable_rcuidle(core);
963
964 if (core->ops->disable)
965 core->ops->disable(core->hw);
966
967 trace_clk_disable_complete_rcuidle(core);
968
969 clk_core_disable(core->parent);
970 }
971
clk_core_disable_lock(struct clk_core * core)972 static void clk_core_disable_lock(struct clk_core *core)
973 {
974 unsigned long flags;
975
976 flags = clk_enable_lock();
977 clk_core_disable(core);
978 clk_enable_unlock(flags);
979 }
980
981 /**
982 * clk_disable - gate a clock
983 * @clk: the clk being gated
984 *
985 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
986 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
987 * clk if the operation is fast and will never sleep. One example is a
988 * SoC-internal clk which is controlled via simple register writes. In the
989 * complex case a clk gate operation may require a fast and a slow part. It is
990 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
991 * In fact clk_disable must be called before clk_unprepare.
992 */
clk_disable(struct clk * clk)993 void clk_disable(struct clk *clk)
994 {
995 if (IS_ERR_OR_NULL(clk))
996 return;
997
998 clk_core_disable_lock(clk->core);
999 }
1000 EXPORT_SYMBOL_GPL(clk_disable);
1001
clk_core_enable(struct clk_core * core)1002 static int clk_core_enable(struct clk_core *core)
1003 {
1004 int ret = 0;
1005
1006 lockdep_assert_held(&enable_lock);
1007
1008 if (!core)
1009 return 0;
1010
1011 if (WARN(core->prepare_count == 0,
1012 "Enabling unprepared %s\n", core->name))
1013 return -ESHUTDOWN;
1014
1015 if (core->enable_count == 0) {
1016 ret = clk_core_enable(core->parent);
1017
1018 if (ret)
1019 return ret;
1020
1021 trace_clk_enable_rcuidle(core);
1022
1023 if (core->ops->enable)
1024 ret = core->ops->enable(core->hw);
1025
1026 trace_clk_enable_complete_rcuidle(core);
1027
1028 if (ret) {
1029 clk_core_disable(core->parent);
1030 return ret;
1031 }
1032 }
1033
1034 core->enable_count++;
1035 return 0;
1036 }
1037
clk_core_enable_lock(struct clk_core * core)1038 static int clk_core_enable_lock(struct clk_core *core)
1039 {
1040 unsigned long flags;
1041 int ret;
1042
1043 flags = clk_enable_lock();
1044 ret = clk_core_enable(core);
1045 clk_enable_unlock(flags);
1046
1047 return ret;
1048 }
1049
1050 /**
1051 * clk_gate_restore_context - restore context for poweroff
1052 * @hw: the clk_hw pointer of clock whose state is to be restored
1053 *
1054 * The clock gate restore context function enables or disables
1055 * the gate clocks based on the enable_count. This is done in cases
1056 * where the clock context is lost and based on the enable_count
1057 * the clock either needs to be enabled/disabled. This
1058 * helps restore the state of gate clocks.
1059 */
clk_gate_restore_context(struct clk_hw * hw)1060 void clk_gate_restore_context(struct clk_hw *hw)
1061 {
1062 struct clk_core *core = hw->core;
1063
1064 if (core->enable_count)
1065 core->ops->enable(hw);
1066 else
1067 core->ops->disable(hw);
1068 }
1069 EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1070
clk_core_save_context(struct clk_core * core)1071 static int clk_core_save_context(struct clk_core *core)
1072 {
1073 struct clk_core *child;
1074 int ret = 0;
1075
1076 hlist_for_each_entry(child, &core->children, child_node) {
1077 ret = clk_core_save_context(child);
1078 if (ret < 0)
1079 return ret;
1080 }
1081
1082 if (core->ops && core->ops->save_context)
1083 ret = core->ops->save_context(core->hw);
1084
1085 return ret;
1086 }
1087
clk_core_restore_context(struct clk_core * core)1088 static void clk_core_restore_context(struct clk_core *core)
1089 {
1090 struct clk_core *child;
1091
1092 if (core->ops && core->ops->restore_context)
1093 core->ops->restore_context(core->hw);
1094
1095 hlist_for_each_entry(child, &core->children, child_node)
1096 clk_core_restore_context(child);
1097 }
1098
1099 /**
1100 * clk_save_context - save clock context for poweroff
1101 *
1102 * Saves the context of the clock register for powerstates in which the
1103 * contents of the registers will be lost. Occurs deep within the suspend
1104 * code. Returns 0 on success.
1105 */
clk_save_context(void)1106 int clk_save_context(void)
1107 {
1108 struct clk_core *clk;
1109 int ret;
1110
1111 hlist_for_each_entry(clk, &clk_root_list, child_node) {
1112 ret = clk_core_save_context(clk);
1113 if (ret < 0)
1114 return ret;
1115 }
1116
1117 hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1118 ret = clk_core_save_context(clk);
1119 if (ret < 0)
1120 return ret;
1121 }
1122
1123 return 0;
1124 }
1125 EXPORT_SYMBOL_GPL(clk_save_context);
1126
1127 /**
1128 * clk_restore_context - restore clock context after poweroff
1129 *
1130 * Restore the saved clock context upon resume.
1131 *
1132 */
clk_restore_context(void)1133 void clk_restore_context(void)
1134 {
1135 struct clk_core *core;
1136
1137 hlist_for_each_entry(core, &clk_root_list, child_node)
1138 clk_core_restore_context(core);
1139
1140 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1141 clk_core_restore_context(core);
1142 }
1143 EXPORT_SYMBOL_GPL(clk_restore_context);
1144
1145 /**
1146 * clk_enable - ungate a clock
1147 * @clk: the clk being ungated
1148 *
1149 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
1150 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1151 * if the operation will never sleep. One example is a SoC-internal clk which
1152 * is controlled via simple register writes. In the complex case a clk ungate
1153 * operation may require a fast and a slow part. It is this reason that
1154 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
1155 * must be called before clk_enable. Returns 0 on success, -EERROR
1156 * otherwise.
1157 */
clk_enable(struct clk * clk)1158 int clk_enable(struct clk *clk)
1159 {
1160 if (!clk)
1161 return 0;
1162
1163 return clk_core_enable_lock(clk->core);
1164 }
1165 EXPORT_SYMBOL_GPL(clk_enable);
1166
clk_core_prepare_enable(struct clk_core * core)1167 static int clk_core_prepare_enable(struct clk_core *core)
1168 {
1169 int ret;
1170
1171 ret = clk_core_prepare_lock(core);
1172 if (ret)
1173 return ret;
1174
1175 ret = clk_core_enable_lock(core);
1176 if (ret)
1177 clk_core_unprepare_lock(core);
1178
1179 return ret;
1180 }
1181
clk_core_disable_unprepare(struct clk_core * core)1182 static void clk_core_disable_unprepare(struct clk_core *core)
1183 {
1184 clk_core_disable_lock(core);
1185 clk_core_unprepare_lock(core);
1186 }
1187
clk_unprepare_unused_subtree(struct clk_core * core)1188 static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1189 {
1190 struct clk_core *child;
1191
1192 lockdep_assert_held(&prepare_lock);
1193
1194 hlist_for_each_entry(child, &core->children, child_node)
1195 clk_unprepare_unused_subtree(child);
1196
1197 if (core->prepare_count)
1198 return;
1199
1200 if (core->flags & CLK_IGNORE_UNUSED)
1201 return;
1202
1203 if (clk_pm_runtime_get(core))
1204 return;
1205
1206 if (clk_core_is_prepared(core)) {
1207 trace_clk_unprepare(core);
1208 if (core->ops->unprepare_unused)
1209 core->ops->unprepare_unused(core->hw);
1210 else if (core->ops->unprepare)
1211 core->ops->unprepare(core->hw);
1212 trace_clk_unprepare_complete(core);
1213 }
1214
1215 clk_pm_runtime_put(core);
1216 }
1217
clk_disable_unused_subtree(struct clk_core * core)1218 static void __init clk_disable_unused_subtree(struct clk_core *core)
1219 {
1220 struct clk_core *child;
1221 unsigned long flags;
1222
1223 lockdep_assert_held(&prepare_lock);
1224
1225 hlist_for_each_entry(child, &core->children, child_node)
1226 clk_disable_unused_subtree(child);
1227
1228 if (core->flags & CLK_OPS_PARENT_ENABLE)
1229 clk_core_prepare_enable(core->parent);
1230
1231 if (clk_pm_runtime_get(core))
1232 goto unprepare_out;
1233
1234 flags = clk_enable_lock();
1235
1236 if (core->enable_count)
1237 goto unlock_out;
1238
1239 if (core->flags & CLK_IGNORE_UNUSED)
1240 goto unlock_out;
1241
1242 /*
1243 * some gate clocks have special needs during the disable-unused
1244 * sequence. call .disable_unused if available, otherwise fall
1245 * back to .disable
1246 */
1247 if (clk_core_is_enabled(core)) {
1248 trace_clk_disable(core);
1249 if (core->ops->disable_unused)
1250 core->ops->disable_unused(core->hw);
1251 else if (core->ops->disable)
1252 core->ops->disable(core->hw);
1253 trace_clk_disable_complete(core);
1254 }
1255
1256 unlock_out:
1257 clk_enable_unlock(flags);
1258 clk_pm_runtime_put(core);
1259 unprepare_out:
1260 if (core->flags & CLK_OPS_PARENT_ENABLE)
1261 clk_core_disable_unprepare(core->parent);
1262 }
1263
1264 static bool clk_ignore_unused __initdata;
clk_ignore_unused_setup(char * __unused)1265 static int __init clk_ignore_unused_setup(char *__unused)
1266 {
1267 clk_ignore_unused = true;
1268 return 1;
1269 }
1270 __setup("clk_ignore_unused", clk_ignore_unused_setup);
1271
clk_disable_unused(void)1272 static int __init clk_disable_unused(void)
1273 {
1274 struct clk_core *core;
1275
1276 if (clk_ignore_unused) {
1277 pr_warn("clk: Not disabling unused clocks\n");
1278 return 0;
1279 }
1280
1281 clk_prepare_lock();
1282
1283 hlist_for_each_entry(core, &clk_root_list, child_node)
1284 clk_disable_unused_subtree(core);
1285
1286 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1287 clk_disable_unused_subtree(core);
1288
1289 hlist_for_each_entry(core, &clk_root_list, child_node)
1290 clk_unprepare_unused_subtree(core);
1291
1292 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1293 clk_unprepare_unused_subtree(core);
1294
1295 clk_prepare_unlock();
1296
1297 return 0;
1298 }
1299 late_initcall_sync(clk_disable_unused);
1300
clk_core_determine_round_nolock(struct clk_core * core,struct clk_rate_request * req)1301 static int clk_core_determine_round_nolock(struct clk_core *core,
1302 struct clk_rate_request *req)
1303 {
1304 long rate;
1305
1306 lockdep_assert_held(&prepare_lock);
1307
1308 if (!core)
1309 return 0;
1310
1311 /*
1312 * At this point, core protection will be disabled if
1313 * - if the provider is not protected at all
1314 * - if the calling consumer is the only one which has exclusivity
1315 * over the provider
1316 */
1317 if (clk_core_rate_is_protected(core)) {
1318 req->rate = core->rate;
1319 } else if (core->ops->determine_rate) {
1320 return core->ops->determine_rate(core->hw, req);
1321 } else if (core->ops->round_rate) {
1322 rate = core->ops->round_rate(core->hw, req->rate,
1323 &req->best_parent_rate);
1324 if (rate < 0)
1325 return rate;
1326
1327 req->rate = rate;
1328 } else {
1329 return -EINVAL;
1330 }
1331
1332 return 0;
1333 }
1334
clk_core_init_rate_req(struct clk_core * const core,struct clk_rate_request * req)1335 static void clk_core_init_rate_req(struct clk_core * const core,
1336 struct clk_rate_request *req)
1337 {
1338 struct clk_core *parent;
1339
1340 if (WARN_ON(!core || !req))
1341 return;
1342
1343 parent = core->parent;
1344 if (parent) {
1345 req->best_parent_hw = parent->hw;
1346 req->best_parent_rate = parent->rate;
1347 } else {
1348 req->best_parent_hw = NULL;
1349 req->best_parent_rate = 0;
1350 }
1351 }
1352
clk_core_can_round(struct clk_core * const core)1353 static bool clk_core_can_round(struct clk_core * const core)
1354 {
1355 return core->ops->determine_rate || core->ops->round_rate;
1356 }
1357
clk_core_round_rate_nolock(struct clk_core * core,struct clk_rate_request * req)1358 static int clk_core_round_rate_nolock(struct clk_core *core,
1359 struct clk_rate_request *req)
1360 {
1361 lockdep_assert_held(&prepare_lock);
1362
1363 if (!core) {
1364 req->rate = 0;
1365 return 0;
1366 }
1367
1368 clk_core_init_rate_req(core, req);
1369
1370 if (clk_core_can_round(core))
1371 return clk_core_determine_round_nolock(core, req);
1372 else if (core->flags & CLK_SET_RATE_PARENT)
1373 return clk_core_round_rate_nolock(core->parent, req);
1374
1375 req->rate = core->rate;
1376 return 0;
1377 }
1378
1379 /**
1380 * __clk_determine_rate - get the closest rate actually supported by a clock
1381 * @hw: determine the rate of this clock
1382 * @req: target rate request
1383 *
1384 * Useful for clk_ops such as .set_rate and .determine_rate.
1385 */
__clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1386 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1387 {
1388 if (!hw) {
1389 req->rate = 0;
1390 return 0;
1391 }
1392
1393 return clk_core_round_rate_nolock(hw->core, req);
1394 }
1395 EXPORT_SYMBOL_GPL(__clk_determine_rate);
1396
1397 /**
1398 * clk_hw_round_rate() - round the given rate for a hw clk
1399 * @hw: the hw clk for which we are rounding a rate
1400 * @rate: the rate which is to be rounded
1401 *
1402 * Takes in a rate as input and rounds it to a rate that the clk can actually
1403 * use.
1404 *
1405 * Context: prepare_lock must be held.
1406 * For clk providers to call from within clk_ops such as .round_rate,
1407 * .determine_rate.
1408 *
1409 * Return: returns rounded rate of hw clk if clk supports round_rate operation
1410 * else returns the parent rate.
1411 */
clk_hw_round_rate(struct clk_hw * hw,unsigned long rate)1412 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1413 {
1414 int ret;
1415 struct clk_rate_request req;
1416
1417 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1418 req.rate = rate;
1419
1420 ret = clk_core_round_rate_nolock(hw->core, &req);
1421 if (ret)
1422 return 0;
1423
1424 return req.rate;
1425 }
1426 EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1427
1428 /**
1429 * clk_round_rate - round the given rate for a clk
1430 * @clk: the clk for which we are rounding a rate
1431 * @rate: the rate which is to be rounded
1432 *
1433 * Takes in a rate as input and rounds it to a rate that the clk can actually
1434 * use which is then returned. If clk doesn't support round_rate operation
1435 * then the parent rate is returned.
1436 */
clk_round_rate(struct clk * clk,unsigned long rate)1437 long clk_round_rate(struct clk *clk, unsigned long rate)
1438 {
1439 struct clk_rate_request req;
1440 int ret;
1441
1442 if (!clk)
1443 return 0;
1444
1445 clk_prepare_lock();
1446
1447 if (clk->exclusive_count)
1448 clk_core_rate_unprotect(clk->core);
1449
1450 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1451 req.rate = rate;
1452
1453 ret = clk_core_round_rate_nolock(clk->core, &req);
1454
1455 if (clk->exclusive_count)
1456 clk_core_rate_protect(clk->core);
1457
1458 clk_prepare_unlock();
1459
1460 if (ret)
1461 return ret;
1462
1463 return req.rate;
1464 }
1465 EXPORT_SYMBOL_GPL(clk_round_rate);
1466
1467 /**
1468 * __clk_notify - call clk notifier chain
1469 * @core: clk that is changing rate
1470 * @msg: clk notifier type (see include/linux/clk.h)
1471 * @old_rate: old clk rate
1472 * @new_rate: new clk rate
1473 *
1474 * Triggers a notifier call chain on the clk rate-change notification
1475 * for 'clk'. Passes a pointer to the struct clk and the previous
1476 * and current rates to the notifier callback. Intended to be called by
1477 * internal clock code only. Returns NOTIFY_DONE from the last driver
1478 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1479 * a driver returns that.
1480 */
__clk_notify(struct clk_core * core,unsigned long msg,unsigned long old_rate,unsigned long new_rate)1481 static int __clk_notify(struct clk_core *core, unsigned long msg,
1482 unsigned long old_rate, unsigned long new_rate)
1483 {
1484 struct clk_notifier *cn;
1485 struct clk_notifier_data cnd;
1486 int ret = NOTIFY_DONE;
1487
1488 cnd.old_rate = old_rate;
1489 cnd.new_rate = new_rate;
1490
1491 list_for_each_entry(cn, &clk_notifier_list, node) {
1492 if (cn->clk->core == core) {
1493 cnd.clk = cn->clk;
1494 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1495 &cnd);
1496 if (ret & NOTIFY_STOP_MASK)
1497 return ret;
1498 }
1499 }
1500
1501 return ret;
1502 }
1503
1504 /**
1505 * __clk_recalc_accuracies
1506 * @core: first clk in the subtree
1507 *
1508 * Walks the subtree of clks starting with clk and recalculates accuracies as
1509 * it goes. Note that if a clk does not implement the .recalc_accuracy
1510 * callback then it is assumed that the clock will take on the accuracy of its
1511 * parent.
1512 */
__clk_recalc_accuracies(struct clk_core * core)1513 static void __clk_recalc_accuracies(struct clk_core *core)
1514 {
1515 unsigned long parent_accuracy = 0;
1516 struct clk_core *child;
1517
1518 lockdep_assert_held(&prepare_lock);
1519
1520 if (core->parent)
1521 parent_accuracy = core->parent->accuracy;
1522
1523 if (core->ops->recalc_accuracy)
1524 core->accuracy = core->ops->recalc_accuracy(core->hw,
1525 parent_accuracy);
1526 else
1527 core->accuracy = parent_accuracy;
1528
1529 hlist_for_each_entry(child, &core->children, child_node)
1530 __clk_recalc_accuracies(child);
1531 }
1532
clk_core_get_accuracy_recalc(struct clk_core * core)1533 static long clk_core_get_accuracy_recalc(struct clk_core *core)
1534 {
1535 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1536 __clk_recalc_accuracies(core);
1537
1538 return clk_core_get_accuracy_no_lock(core);
1539 }
1540
1541 /**
1542 * clk_get_accuracy - return the accuracy of clk
1543 * @clk: the clk whose accuracy is being returned
1544 *
1545 * Simply returns the cached accuracy of the clk, unless
1546 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1547 * issued.
1548 * If clk is NULL then returns 0.
1549 */
clk_get_accuracy(struct clk * clk)1550 long clk_get_accuracy(struct clk *clk)
1551 {
1552 long accuracy;
1553
1554 if (!clk)
1555 return 0;
1556
1557 clk_prepare_lock();
1558 accuracy = clk_core_get_accuracy_recalc(clk->core);
1559 clk_prepare_unlock();
1560
1561 return accuracy;
1562 }
1563 EXPORT_SYMBOL_GPL(clk_get_accuracy);
1564
clk_recalc(struct clk_core * core,unsigned long parent_rate)1565 static unsigned long clk_recalc(struct clk_core *core,
1566 unsigned long parent_rate)
1567 {
1568 unsigned long rate = parent_rate;
1569
1570 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1571 rate = core->ops->recalc_rate(core->hw, parent_rate);
1572 clk_pm_runtime_put(core);
1573 }
1574 return rate;
1575 }
1576
1577 /**
1578 * __clk_recalc_rates
1579 * @core: first clk in the subtree
1580 * @msg: notification type (see include/linux/clk.h)
1581 *
1582 * Walks the subtree of clks starting with clk and recalculates rates as it
1583 * goes. Note that if a clk does not implement the .recalc_rate callback then
1584 * it is assumed that the clock will take on the rate of its parent.
1585 *
1586 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1587 * if necessary.
1588 */
__clk_recalc_rates(struct clk_core * core,unsigned long msg)1589 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1590 {
1591 unsigned long old_rate;
1592 unsigned long parent_rate = 0;
1593 struct clk_core *child;
1594
1595 lockdep_assert_held(&prepare_lock);
1596
1597 old_rate = core->rate;
1598
1599 if (core->parent)
1600 parent_rate = core->parent->rate;
1601
1602 core->rate = clk_recalc(core, parent_rate);
1603
1604 /*
1605 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1606 * & ABORT_RATE_CHANGE notifiers
1607 */
1608 if (core->notifier_count && msg)
1609 __clk_notify(core, msg, old_rate, core->rate);
1610
1611 hlist_for_each_entry(child, &core->children, child_node)
1612 __clk_recalc_rates(child, msg);
1613 }
1614
clk_core_get_rate_recalc(struct clk_core * core)1615 static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
1616 {
1617 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1618 __clk_recalc_rates(core, 0);
1619
1620 return clk_core_get_rate_nolock(core);
1621 }
1622
1623 /**
1624 * clk_get_rate - return the rate of clk
1625 * @clk: the clk whose rate is being returned
1626 *
1627 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1628 * is set, which means a recalc_rate will be issued.
1629 * If clk is NULL then returns 0.
1630 */
clk_get_rate(struct clk * clk)1631 unsigned long clk_get_rate(struct clk *clk)
1632 {
1633 unsigned long rate;
1634
1635 if (!clk)
1636 return 0;
1637
1638 clk_prepare_lock();
1639 rate = clk_core_get_rate_recalc(clk->core);
1640 clk_prepare_unlock();
1641
1642 return rate;
1643 }
1644 EXPORT_SYMBOL_GPL(clk_get_rate);
1645
clk_fetch_parent_index(struct clk_core * core,struct clk_core * parent)1646 static int clk_fetch_parent_index(struct clk_core *core,
1647 struct clk_core *parent)
1648 {
1649 int i;
1650
1651 if (!parent)
1652 return -EINVAL;
1653
1654 for (i = 0; i < core->num_parents; i++) {
1655 /* Found it first try! */
1656 if (core->parents[i].core == parent)
1657 return i;
1658
1659 /* Something else is here, so keep looking */
1660 if (core->parents[i].core)
1661 continue;
1662
1663 /* Maybe core hasn't been cached but the hw is all we know? */
1664 if (core->parents[i].hw) {
1665 if (core->parents[i].hw == parent->hw)
1666 break;
1667
1668 /* Didn't match, but we're expecting a clk_hw */
1669 continue;
1670 }
1671
1672 /* Maybe it hasn't been cached (clk_set_parent() path) */
1673 if (parent == clk_core_get(core, i))
1674 break;
1675
1676 /* Fallback to comparing globally unique names */
1677 if (core->parents[i].name &&
1678 !strcmp(parent->name, core->parents[i].name))
1679 break;
1680 }
1681
1682 if (i == core->num_parents)
1683 return -EINVAL;
1684
1685 core->parents[i].core = parent;
1686 return i;
1687 }
1688
1689 /**
1690 * clk_hw_get_parent_index - return the index of the parent clock
1691 * @hw: clk_hw associated with the clk being consumed
1692 *
1693 * Fetches and returns the index of parent clock. Returns -EINVAL if the given
1694 * clock does not have a current parent.
1695 */
clk_hw_get_parent_index(struct clk_hw * hw)1696 int clk_hw_get_parent_index(struct clk_hw *hw)
1697 {
1698 struct clk_hw *parent = clk_hw_get_parent(hw);
1699
1700 if (WARN_ON(parent == NULL))
1701 return -EINVAL;
1702
1703 return clk_fetch_parent_index(hw->core, parent->core);
1704 }
1705 EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
1706
1707 /*
1708 * Update the orphan status of @core and all its children.
1709 */
clk_core_update_orphan_status(struct clk_core * core,bool is_orphan)1710 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1711 {
1712 struct clk_core *child;
1713
1714 core->orphan = is_orphan;
1715
1716 hlist_for_each_entry(child, &core->children, child_node)
1717 clk_core_update_orphan_status(child, is_orphan);
1718 }
1719
clk_reparent(struct clk_core * core,struct clk_core * new_parent)1720 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1721 {
1722 bool was_orphan = core->orphan;
1723
1724 hlist_del(&core->child_node);
1725
1726 if (new_parent) {
1727 bool becomes_orphan = new_parent->orphan;
1728
1729 /* avoid duplicate POST_RATE_CHANGE notifications */
1730 if (new_parent->new_child == core)
1731 new_parent->new_child = NULL;
1732
1733 hlist_add_head(&core->child_node, &new_parent->children);
1734
1735 if (was_orphan != becomes_orphan)
1736 clk_core_update_orphan_status(core, becomes_orphan);
1737 } else {
1738 hlist_add_head(&core->child_node, &clk_orphan_list);
1739 if (!was_orphan)
1740 clk_core_update_orphan_status(core, true);
1741 }
1742
1743 core->parent = new_parent;
1744 }
1745
__clk_set_parent_before(struct clk_core * core,struct clk_core * parent)1746 static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1747 struct clk_core *parent)
1748 {
1749 unsigned long flags;
1750 struct clk_core *old_parent = core->parent;
1751
1752 /*
1753 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1754 *
1755 * 2. Migrate prepare state between parents and prevent race with
1756 * clk_enable().
1757 *
1758 * If the clock is not prepared, then a race with
1759 * clk_enable/disable() is impossible since we already have the
1760 * prepare lock (future calls to clk_enable() need to be preceded by
1761 * a clk_prepare()).
1762 *
1763 * If the clock is prepared, migrate the prepared state to the new
1764 * parent and also protect against a race with clk_enable() by
1765 * forcing the clock and the new parent on. This ensures that all
1766 * future calls to clk_enable() are practically NOPs with respect to
1767 * hardware and software states.
1768 *
1769 * See also: Comment for clk_set_parent() below.
1770 */
1771
1772 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1773 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1774 clk_core_prepare_enable(old_parent);
1775 clk_core_prepare_enable(parent);
1776 }
1777
1778 /* migrate prepare count if > 0 */
1779 if (core->prepare_count) {
1780 clk_core_prepare_enable(parent);
1781 clk_core_enable_lock(core);
1782 }
1783
1784 /* update the clk tree topology */
1785 flags = clk_enable_lock();
1786 clk_reparent(core, parent);
1787 clk_enable_unlock(flags);
1788
1789 return old_parent;
1790 }
1791
__clk_set_parent_after(struct clk_core * core,struct clk_core * parent,struct clk_core * old_parent)1792 static void __clk_set_parent_after(struct clk_core *core,
1793 struct clk_core *parent,
1794 struct clk_core *old_parent)
1795 {
1796 /*
1797 * Finish the migration of prepare state and undo the changes done
1798 * for preventing a race with clk_enable().
1799 */
1800 if (core->prepare_count) {
1801 clk_core_disable_lock(core);
1802 clk_core_disable_unprepare(old_parent);
1803 }
1804
1805 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1806 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1807 clk_core_disable_unprepare(parent);
1808 clk_core_disable_unprepare(old_parent);
1809 }
1810 }
1811
__clk_set_parent(struct clk_core * core,struct clk_core * parent,u8 p_index)1812 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1813 u8 p_index)
1814 {
1815 unsigned long flags;
1816 int ret = 0;
1817 struct clk_core *old_parent;
1818
1819 old_parent = __clk_set_parent_before(core, parent);
1820
1821 trace_clk_set_parent(core, parent);
1822
1823 /* change clock input source */
1824 if (parent && core->ops->set_parent)
1825 ret = core->ops->set_parent(core->hw, p_index);
1826
1827 trace_clk_set_parent_complete(core, parent);
1828
1829 if (ret) {
1830 flags = clk_enable_lock();
1831 clk_reparent(core, old_parent);
1832 clk_enable_unlock(flags);
1833 __clk_set_parent_after(core, old_parent, parent);
1834
1835 return ret;
1836 }
1837
1838 __clk_set_parent_after(core, parent, old_parent);
1839
1840 return 0;
1841 }
1842
1843 /**
1844 * __clk_speculate_rates
1845 * @core: first clk in the subtree
1846 * @parent_rate: the "future" rate of clk's parent
1847 *
1848 * Walks the subtree of clks starting with clk, speculating rates as it
1849 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1850 *
1851 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1852 * pre-rate change notifications and returns early if no clks in the
1853 * subtree have subscribed to the notifications. Note that if a clk does not
1854 * implement the .recalc_rate callback then it is assumed that the clock will
1855 * take on the rate of its parent.
1856 */
__clk_speculate_rates(struct clk_core * core,unsigned long parent_rate)1857 static int __clk_speculate_rates(struct clk_core *core,
1858 unsigned long parent_rate)
1859 {
1860 struct clk_core *child;
1861 unsigned long new_rate;
1862 int ret = NOTIFY_DONE;
1863
1864 lockdep_assert_held(&prepare_lock);
1865
1866 new_rate = clk_recalc(core, parent_rate);
1867
1868 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1869 if (core->notifier_count)
1870 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1871
1872 if (ret & NOTIFY_STOP_MASK) {
1873 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1874 __func__, core->name, ret);
1875 goto out;
1876 }
1877
1878 hlist_for_each_entry(child, &core->children, child_node) {
1879 ret = __clk_speculate_rates(child, new_rate);
1880 if (ret & NOTIFY_STOP_MASK)
1881 break;
1882 }
1883
1884 out:
1885 return ret;
1886 }
1887
clk_calc_subtree(struct clk_core * core,unsigned long new_rate,struct clk_core * new_parent,u8 p_index)1888 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1889 struct clk_core *new_parent, u8 p_index)
1890 {
1891 struct clk_core *child;
1892
1893 core->new_rate = new_rate;
1894 core->new_parent = new_parent;
1895 core->new_parent_index = p_index;
1896 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1897 core->new_child = NULL;
1898 if (new_parent && new_parent != core->parent)
1899 new_parent->new_child = core;
1900
1901 hlist_for_each_entry(child, &core->children, child_node) {
1902 child->new_rate = clk_recalc(child, new_rate);
1903 clk_calc_subtree(child, child->new_rate, NULL, 0);
1904 }
1905 }
1906
1907 /*
1908 * calculate the new rates returning the topmost clock that has to be
1909 * changed.
1910 */
clk_calc_new_rates(struct clk_core * core,unsigned long rate)1911 static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1912 unsigned long rate)
1913 {
1914 struct clk_core *top = core;
1915 struct clk_core *old_parent, *parent;
1916 unsigned long best_parent_rate = 0;
1917 unsigned long new_rate;
1918 unsigned long min_rate;
1919 unsigned long max_rate;
1920 int p_index = 0;
1921 long ret;
1922
1923 /* sanity */
1924 if (IS_ERR_OR_NULL(core))
1925 return NULL;
1926
1927 /* save parent rate, if it exists */
1928 parent = old_parent = core->parent;
1929 if (parent)
1930 best_parent_rate = parent->rate;
1931
1932 clk_core_get_boundaries(core, &min_rate, &max_rate);
1933
1934 /* find the closest rate and parent clk/rate */
1935 if (clk_core_can_round(core)) {
1936 struct clk_rate_request req;
1937
1938 req.rate = rate;
1939 req.min_rate = min_rate;
1940 req.max_rate = max_rate;
1941
1942 clk_core_init_rate_req(core, &req);
1943
1944 ret = clk_core_determine_round_nolock(core, &req);
1945 if (ret < 0)
1946 return NULL;
1947
1948 best_parent_rate = req.best_parent_rate;
1949 new_rate = req.rate;
1950 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1951
1952 if (new_rate < min_rate || new_rate > max_rate)
1953 return NULL;
1954 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1955 /* pass-through clock without adjustable parent */
1956 core->new_rate = core->rate;
1957 return NULL;
1958 } else {
1959 /* pass-through clock with adjustable parent */
1960 top = clk_calc_new_rates(parent, rate);
1961 new_rate = parent->new_rate;
1962 goto out;
1963 }
1964
1965 /* some clocks must be gated to change parent */
1966 if (parent != old_parent &&
1967 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1968 pr_debug("%s: %s not gated but wants to reparent\n",
1969 __func__, core->name);
1970 return NULL;
1971 }
1972
1973 /* try finding the new parent index */
1974 if (parent && core->num_parents > 1) {
1975 p_index = clk_fetch_parent_index(core, parent);
1976 if (p_index < 0) {
1977 pr_debug("%s: clk %s can not be parent of clk %s\n",
1978 __func__, parent->name, core->name);
1979 return NULL;
1980 }
1981 }
1982
1983 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1984 best_parent_rate != parent->rate)
1985 top = clk_calc_new_rates(parent, best_parent_rate);
1986
1987 out:
1988 clk_calc_subtree(core, new_rate, parent, p_index);
1989
1990 return top;
1991 }
1992
1993 /*
1994 * Notify about rate changes in a subtree. Always walk down the whole tree
1995 * so that in case of an error we can walk down the whole tree again and
1996 * abort the change.
1997 */
clk_propagate_rate_change(struct clk_core * core,unsigned long event)1998 static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1999 unsigned long event)
2000 {
2001 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
2002 int ret = NOTIFY_DONE;
2003
2004 if (core->rate == core->new_rate)
2005 return NULL;
2006
2007 if (core->notifier_count) {
2008 ret = __clk_notify(core, event, core->rate, core->new_rate);
2009 if (ret & NOTIFY_STOP_MASK)
2010 fail_clk = core;
2011 }
2012
2013 hlist_for_each_entry(child, &core->children, child_node) {
2014 /* Skip children who will be reparented to another clock */
2015 if (child->new_parent && child->new_parent != core)
2016 continue;
2017 tmp_clk = clk_propagate_rate_change(child, event);
2018 if (tmp_clk)
2019 fail_clk = tmp_clk;
2020 }
2021
2022 /* handle the new child who might not be in core->children yet */
2023 if (core->new_child) {
2024 tmp_clk = clk_propagate_rate_change(core->new_child, event);
2025 if (tmp_clk)
2026 fail_clk = tmp_clk;
2027 }
2028
2029 return fail_clk;
2030 }
2031
2032 /*
2033 * walk down a subtree and set the new rates notifying the rate
2034 * change on the way
2035 */
clk_change_rate(struct clk_core * core)2036 static void clk_change_rate(struct clk_core *core)
2037 {
2038 struct clk_core *child;
2039 struct hlist_node *tmp;
2040 unsigned long old_rate;
2041 unsigned long best_parent_rate = 0;
2042 bool skip_set_rate = false;
2043 struct clk_core *old_parent;
2044 struct clk_core *parent = NULL;
2045
2046 old_rate = core->rate;
2047
2048 if (core->new_parent) {
2049 parent = core->new_parent;
2050 best_parent_rate = core->new_parent->rate;
2051 } else if (core->parent) {
2052 parent = core->parent;
2053 best_parent_rate = core->parent->rate;
2054 }
2055
2056 if (clk_pm_runtime_get(core))
2057 return;
2058
2059 if (core->flags & CLK_SET_RATE_UNGATE) {
2060 unsigned long flags;
2061
2062 clk_core_prepare(core);
2063 flags = clk_enable_lock();
2064 clk_core_enable(core);
2065 clk_enable_unlock(flags);
2066 }
2067
2068 if (core->new_parent && core->new_parent != core->parent) {
2069 old_parent = __clk_set_parent_before(core, core->new_parent);
2070 trace_clk_set_parent(core, core->new_parent);
2071
2072 if (core->ops->set_rate_and_parent) {
2073 skip_set_rate = true;
2074 core->ops->set_rate_and_parent(core->hw, core->new_rate,
2075 best_parent_rate,
2076 core->new_parent_index);
2077 } else if (core->ops->set_parent) {
2078 core->ops->set_parent(core->hw, core->new_parent_index);
2079 }
2080
2081 trace_clk_set_parent_complete(core, core->new_parent);
2082 __clk_set_parent_after(core, core->new_parent, old_parent);
2083 }
2084
2085 if (core->flags & CLK_OPS_PARENT_ENABLE)
2086 clk_core_prepare_enable(parent);
2087
2088 trace_clk_set_rate(core, core->new_rate);
2089
2090 if (!skip_set_rate && core->ops->set_rate)
2091 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2092
2093 trace_clk_set_rate_complete(core, core->new_rate);
2094
2095 core->rate = clk_recalc(core, best_parent_rate);
2096
2097 if (core->flags & CLK_SET_RATE_UNGATE) {
2098 unsigned long flags;
2099
2100 flags = clk_enable_lock();
2101 clk_core_disable(core);
2102 clk_enable_unlock(flags);
2103 clk_core_unprepare(core);
2104 }
2105
2106 if (core->flags & CLK_OPS_PARENT_ENABLE)
2107 clk_core_disable_unprepare(parent);
2108
2109 if (core->notifier_count && old_rate != core->rate)
2110 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2111
2112 if (core->flags & CLK_RECALC_NEW_RATES)
2113 (void)clk_calc_new_rates(core, core->new_rate);
2114
2115 /*
2116 * Use safe iteration, as change_rate can actually swap parents
2117 * for certain clock types.
2118 */
2119 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2120 /* Skip children who will be reparented to another clock */
2121 if (child->new_parent && child->new_parent != core)
2122 continue;
2123 clk_change_rate(child);
2124 }
2125
2126 /* handle the new child who might not be in core->children yet */
2127 if (core->new_child)
2128 clk_change_rate(core->new_child);
2129
2130 clk_pm_runtime_put(core);
2131 }
2132
clk_core_req_round_rate_nolock(struct clk_core * core,unsigned long req_rate)2133 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2134 unsigned long req_rate)
2135 {
2136 int ret, cnt;
2137 struct clk_rate_request req;
2138
2139 lockdep_assert_held(&prepare_lock);
2140
2141 if (!core)
2142 return 0;
2143
2144 /* simulate what the rate would be if it could be freely set */
2145 cnt = clk_core_rate_nuke_protect(core);
2146 if (cnt < 0)
2147 return cnt;
2148
2149 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
2150 req.rate = req_rate;
2151
2152 ret = clk_core_round_rate_nolock(core, &req);
2153
2154 /* restore the protection */
2155 clk_core_rate_restore_protect(core, cnt);
2156
2157 return ret ? 0 : req.rate;
2158 }
2159
clk_core_set_rate_nolock(struct clk_core * core,unsigned long req_rate)2160 static int clk_core_set_rate_nolock(struct clk_core *core,
2161 unsigned long req_rate)
2162 {
2163 struct clk_core *top, *fail_clk;
2164 unsigned long rate;
2165 int ret = 0;
2166
2167 if (!core)
2168 return 0;
2169
2170 rate = clk_core_req_round_rate_nolock(core, req_rate);
2171
2172 /* bail early if nothing to do */
2173 if (rate == clk_core_get_rate_nolock(core))
2174 return 0;
2175
2176 /* fail on a direct rate set of a protected provider */
2177 if (clk_core_rate_is_protected(core))
2178 return -EBUSY;
2179
2180 /* calculate new rates and get the topmost changed clock */
2181 top = clk_calc_new_rates(core, req_rate);
2182 if (!top)
2183 return -EINVAL;
2184
2185 ret = clk_pm_runtime_get(core);
2186 if (ret)
2187 return ret;
2188
2189 /* notify that we are about to change rates */
2190 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2191 if (fail_clk) {
2192 pr_debug("%s: failed to set %s rate\n", __func__,
2193 fail_clk->name);
2194 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2195 ret = -EBUSY;
2196 goto err;
2197 }
2198
2199 /* change the rates */
2200 clk_change_rate(top);
2201
2202 core->req_rate = req_rate;
2203 err:
2204 clk_pm_runtime_put(core);
2205
2206 return ret;
2207 }
2208
2209 /**
2210 * clk_set_rate - specify a new rate for clk
2211 * @clk: the clk whose rate is being changed
2212 * @rate: the new rate for clk
2213 *
2214 * In the simplest case clk_set_rate will only adjust the rate of clk.
2215 *
2216 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
2217 * propagate up to clk's parent; whether or not this happens depends on the
2218 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
2219 * after calling .round_rate then upstream parent propagation is ignored. If
2220 * *parent_rate comes back with a new rate for clk's parent then we propagate
2221 * up to clk's parent and set its rate. Upward propagation will continue
2222 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
2223 * .round_rate stops requesting changes to clk's parent_rate.
2224 *
2225 * Rate changes are accomplished via tree traversal that also recalculates the
2226 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
2227 *
2228 * Returns 0 on success, -EERROR otherwise.
2229 */
clk_set_rate(struct clk * clk,unsigned long rate)2230 int clk_set_rate(struct clk *clk, unsigned long rate)
2231 {
2232 int ret;
2233
2234 if (!clk)
2235 return 0;
2236
2237 /* prevent racing with updates to the clock topology */
2238 clk_prepare_lock();
2239
2240 if (clk->exclusive_count)
2241 clk_core_rate_unprotect(clk->core);
2242
2243 ret = clk_core_set_rate_nolock(clk->core, rate);
2244
2245 if (clk->exclusive_count)
2246 clk_core_rate_protect(clk->core);
2247
2248 clk_prepare_unlock();
2249
2250 return ret;
2251 }
2252 EXPORT_SYMBOL_GPL(clk_set_rate);
2253
2254 /**
2255 * clk_set_rate_exclusive - specify a new rate and get exclusive control
2256 * @clk: the clk whose rate is being changed
2257 * @rate: the new rate for clk
2258 *
2259 * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
2260 * within a critical section
2261 *
2262 * This can be used initially to ensure that at least 1 consumer is
2263 * satisfied when several consumers are competing for exclusivity over the
2264 * same clock provider.
2265 *
2266 * The exclusivity is not applied if setting the rate failed.
2267 *
2268 * Calls to clk_rate_exclusive_get() should be balanced with calls to
2269 * clk_rate_exclusive_put().
2270 *
2271 * Returns 0 on success, -EERROR otherwise.
2272 */
clk_set_rate_exclusive(struct clk * clk,unsigned long rate)2273 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2274 {
2275 int ret;
2276
2277 if (!clk)
2278 return 0;
2279
2280 /* prevent racing with updates to the clock topology */
2281 clk_prepare_lock();
2282
2283 /*
2284 * The temporary protection removal is not here, on purpose
2285 * This function is meant to be used instead of clk_rate_protect,
2286 * so before the consumer code path protect the clock provider
2287 */
2288
2289 ret = clk_core_set_rate_nolock(clk->core, rate);
2290 if (!ret) {
2291 clk_core_rate_protect(clk->core);
2292 clk->exclusive_count++;
2293 }
2294
2295 clk_prepare_unlock();
2296
2297 return ret;
2298 }
2299 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2300
2301 /**
2302 * clk_set_rate_range - set a rate range for a clock source
2303 * @clk: clock source
2304 * @min: desired minimum clock rate in Hz, inclusive
2305 * @max: desired maximum clock rate in Hz, inclusive
2306 *
2307 * Returns success (0) or negative errno.
2308 */
clk_set_rate_range(struct clk * clk,unsigned long min,unsigned long max)2309 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2310 {
2311 int ret = 0;
2312 unsigned long old_min, old_max, rate;
2313
2314 if (!clk)
2315 return 0;
2316
2317 if (min > max) {
2318 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2319 __func__, clk->core->name, clk->dev_id, clk->con_id,
2320 min, max);
2321 return -EINVAL;
2322 }
2323
2324 clk_prepare_lock();
2325
2326 if (clk->exclusive_count)
2327 clk_core_rate_unprotect(clk->core);
2328
2329 /* Save the current values in case we need to rollback the change */
2330 old_min = clk->min_rate;
2331 old_max = clk->max_rate;
2332 clk->min_rate = min;
2333 clk->max_rate = max;
2334
2335 rate = clk_core_get_rate_nolock(clk->core);
2336 if (rate < min || rate > max) {
2337 /*
2338 * FIXME:
2339 * We are in bit of trouble here, current rate is outside the
2340 * the requested range. We are going try to request appropriate
2341 * range boundary but there is a catch. It may fail for the
2342 * usual reason (clock broken, clock protected, etc) but also
2343 * because:
2344 * - round_rate() was not favorable and fell on the wrong
2345 * side of the boundary
2346 * - the determine_rate() callback does not really check for
2347 * this corner case when determining the rate
2348 */
2349
2350 if (rate < min)
2351 rate = min;
2352 else
2353 rate = max;
2354
2355 ret = clk_core_set_rate_nolock(clk->core, rate);
2356 if (ret) {
2357 /* rollback the changes */
2358 clk->min_rate = old_min;
2359 clk->max_rate = old_max;
2360 }
2361 }
2362
2363 if (clk->exclusive_count)
2364 clk_core_rate_protect(clk->core);
2365
2366 clk_prepare_unlock();
2367
2368 return ret;
2369 }
2370 EXPORT_SYMBOL_GPL(clk_set_rate_range);
2371
2372 /**
2373 * clk_set_min_rate - set a minimum clock rate for a clock source
2374 * @clk: clock source
2375 * @rate: desired minimum clock rate in Hz, inclusive
2376 *
2377 * Returns success (0) or negative errno.
2378 */
clk_set_min_rate(struct clk * clk,unsigned long rate)2379 int clk_set_min_rate(struct clk *clk, unsigned long rate)
2380 {
2381 if (!clk)
2382 return 0;
2383
2384 return clk_set_rate_range(clk, rate, clk->max_rate);
2385 }
2386 EXPORT_SYMBOL_GPL(clk_set_min_rate);
2387
2388 /**
2389 * clk_set_max_rate - set a maximum clock rate for a clock source
2390 * @clk: clock source
2391 * @rate: desired maximum clock rate in Hz, inclusive
2392 *
2393 * Returns success (0) or negative errno.
2394 */
clk_set_max_rate(struct clk * clk,unsigned long rate)2395 int clk_set_max_rate(struct clk *clk, unsigned long rate)
2396 {
2397 if (!clk)
2398 return 0;
2399
2400 return clk_set_rate_range(clk, clk->min_rate, rate);
2401 }
2402 EXPORT_SYMBOL_GPL(clk_set_max_rate);
2403
2404 /**
2405 * clk_get_parent - return the parent of a clk
2406 * @clk: the clk whose parent gets returned
2407 *
2408 * Simply returns clk->parent. Returns NULL if clk is NULL.
2409 */
clk_get_parent(struct clk * clk)2410 struct clk *clk_get_parent(struct clk *clk)
2411 {
2412 struct clk *parent;
2413
2414 if (!clk)
2415 return NULL;
2416
2417 clk_prepare_lock();
2418 /* TODO: Create a per-user clk and change callers to call clk_put */
2419 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2420 clk_prepare_unlock();
2421
2422 return parent;
2423 }
2424 EXPORT_SYMBOL_GPL(clk_get_parent);
2425
__clk_init_parent(struct clk_core * core)2426 static struct clk_core *__clk_init_parent(struct clk_core *core)
2427 {
2428 u8 index = 0;
2429
2430 if (core->num_parents > 1 && core->ops->get_parent)
2431 index = core->ops->get_parent(core->hw);
2432
2433 return clk_core_get_parent_by_index(core, index);
2434 }
2435
clk_core_reparent(struct clk_core * core,struct clk_core * new_parent)2436 static void clk_core_reparent(struct clk_core *core,
2437 struct clk_core *new_parent)
2438 {
2439 clk_reparent(core, new_parent);
2440 __clk_recalc_accuracies(core);
2441 __clk_recalc_rates(core, POST_RATE_CHANGE);
2442 }
2443
clk_hw_reparent(struct clk_hw * hw,struct clk_hw * new_parent)2444 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2445 {
2446 if (!hw)
2447 return;
2448
2449 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2450 }
2451
2452 /**
2453 * clk_has_parent - check if a clock is a possible parent for another
2454 * @clk: clock source
2455 * @parent: parent clock source
2456 *
2457 * This function can be used in drivers that need to check that a clock can be
2458 * the parent of another without actually changing the parent.
2459 *
2460 * Returns true if @parent is a possible parent for @clk, false otherwise.
2461 */
clk_has_parent(struct clk * clk,struct clk * parent)2462 bool clk_has_parent(struct clk *clk, struct clk *parent)
2463 {
2464 struct clk_core *core, *parent_core;
2465 int i;
2466
2467 /* NULL clocks should be nops, so return success if either is NULL. */
2468 if (!clk || !parent)
2469 return true;
2470
2471 core = clk->core;
2472 parent_core = parent->core;
2473
2474 /* Optimize for the case where the parent is already the parent. */
2475 if (core->parent == parent_core)
2476 return true;
2477
2478 for (i = 0; i < core->num_parents; i++)
2479 if (!strcmp(core->parents[i].name, parent_core->name))
2480 return true;
2481
2482 return false;
2483 }
2484 EXPORT_SYMBOL_GPL(clk_has_parent);
2485
clk_core_set_parent_nolock(struct clk_core * core,struct clk_core * parent)2486 static int clk_core_set_parent_nolock(struct clk_core *core,
2487 struct clk_core *parent)
2488 {
2489 int ret = 0;
2490 int p_index = 0;
2491 unsigned long p_rate = 0;
2492
2493 lockdep_assert_held(&prepare_lock);
2494
2495 if (!core)
2496 return 0;
2497
2498 if (core->parent == parent)
2499 return 0;
2500
2501 /* verify ops for multi-parent clks */
2502 if (core->num_parents > 1 && !core->ops->set_parent)
2503 return -EPERM;
2504
2505 /* check that we are allowed to re-parent if the clock is in use */
2506 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2507 return -EBUSY;
2508
2509 if (clk_core_rate_is_protected(core))
2510 return -EBUSY;
2511
2512 /* try finding the new parent index */
2513 if (parent) {
2514 p_index = clk_fetch_parent_index(core, parent);
2515 if (p_index < 0) {
2516 pr_debug("%s: clk %s can not be parent of clk %s\n",
2517 __func__, parent->name, core->name);
2518 return p_index;
2519 }
2520 p_rate = parent->rate;
2521 }
2522
2523 ret = clk_pm_runtime_get(core);
2524 if (ret)
2525 return ret;
2526
2527 /* propagate PRE_RATE_CHANGE notifications */
2528 ret = __clk_speculate_rates(core, p_rate);
2529
2530 /* abort if a driver objects */
2531 if (ret & NOTIFY_STOP_MASK)
2532 goto runtime_put;
2533
2534 /* do the re-parent */
2535 ret = __clk_set_parent(core, parent, p_index);
2536
2537 /* propagate rate an accuracy recalculation accordingly */
2538 if (ret) {
2539 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2540 } else {
2541 __clk_recalc_rates(core, POST_RATE_CHANGE);
2542 __clk_recalc_accuracies(core);
2543 }
2544
2545 runtime_put:
2546 clk_pm_runtime_put(core);
2547
2548 return ret;
2549 }
2550
clk_hw_set_parent(struct clk_hw * hw,struct clk_hw * parent)2551 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2552 {
2553 return clk_core_set_parent_nolock(hw->core, parent->core);
2554 }
2555 EXPORT_SYMBOL_GPL(clk_hw_set_parent);
2556
2557 /**
2558 * clk_set_parent - switch the parent of a mux clk
2559 * @clk: the mux clk whose input we are switching
2560 * @parent: the new input to clk
2561 *
2562 * Re-parent clk to use parent as its new input source. If clk is in
2563 * prepared state, the clk will get enabled for the duration of this call. If
2564 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2565 * that, the reparenting is glitchy in hardware, etc), use the
2566 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2567 *
2568 * After successfully changing clk's parent clk_set_parent will update the
2569 * clk topology, sysfs topology and propagate rate recalculation via
2570 * __clk_recalc_rates.
2571 *
2572 * Returns 0 on success, -EERROR otherwise.
2573 */
clk_set_parent(struct clk * clk,struct clk * parent)2574 int clk_set_parent(struct clk *clk, struct clk *parent)
2575 {
2576 int ret;
2577
2578 if (!clk)
2579 return 0;
2580
2581 clk_prepare_lock();
2582
2583 if (clk->exclusive_count)
2584 clk_core_rate_unprotect(clk->core);
2585
2586 ret = clk_core_set_parent_nolock(clk->core,
2587 parent ? parent->core : NULL);
2588
2589 if (clk->exclusive_count)
2590 clk_core_rate_protect(clk->core);
2591
2592 clk_prepare_unlock();
2593
2594 return ret;
2595 }
2596 EXPORT_SYMBOL_GPL(clk_set_parent);
2597
clk_core_set_phase_nolock(struct clk_core * core,int degrees)2598 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2599 {
2600 int ret = -EINVAL;
2601
2602 lockdep_assert_held(&prepare_lock);
2603
2604 if (!core)
2605 return 0;
2606
2607 if (clk_core_rate_is_protected(core))
2608 return -EBUSY;
2609
2610 trace_clk_set_phase(core, degrees);
2611
2612 if (core->ops->set_phase) {
2613 ret = core->ops->set_phase(core->hw, degrees);
2614 if (!ret)
2615 core->phase = degrees;
2616 }
2617
2618 trace_clk_set_phase_complete(core, degrees);
2619
2620 return ret;
2621 }
2622
2623 /**
2624 * clk_set_phase - adjust the phase shift of a clock signal
2625 * @clk: clock signal source
2626 * @degrees: number of degrees the signal is shifted
2627 *
2628 * Shifts the phase of a clock signal by the specified
2629 * degrees. Returns 0 on success, -EERROR otherwise.
2630 *
2631 * This function makes no distinction about the input or reference
2632 * signal that we adjust the clock signal phase against. For example
2633 * phase locked-loop clock signal generators we may shift phase with
2634 * respect to feedback clock signal input, but for other cases the
2635 * clock phase may be shifted with respect to some other, unspecified
2636 * signal.
2637 *
2638 * Additionally the concept of phase shift does not propagate through
2639 * the clock tree hierarchy, which sets it apart from clock rates and
2640 * clock accuracy. A parent clock phase attribute does not have an
2641 * impact on the phase attribute of a child clock.
2642 */
clk_set_phase(struct clk * clk,int degrees)2643 int clk_set_phase(struct clk *clk, int degrees)
2644 {
2645 int ret;
2646
2647 if (!clk)
2648 return 0;
2649
2650 /* sanity check degrees */
2651 degrees %= 360;
2652 if (degrees < 0)
2653 degrees += 360;
2654
2655 clk_prepare_lock();
2656
2657 if (clk->exclusive_count)
2658 clk_core_rate_unprotect(clk->core);
2659
2660 ret = clk_core_set_phase_nolock(clk->core, degrees);
2661
2662 if (clk->exclusive_count)
2663 clk_core_rate_protect(clk->core);
2664
2665 clk_prepare_unlock();
2666
2667 return ret;
2668 }
2669 EXPORT_SYMBOL_GPL(clk_set_phase);
2670
clk_core_get_phase(struct clk_core * core)2671 static int clk_core_get_phase(struct clk_core *core)
2672 {
2673 int ret;
2674
2675 lockdep_assert_held(&prepare_lock);
2676 if (!core->ops->get_phase)
2677 return 0;
2678
2679 /* Always try to update cached phase if possible */
2680 ret = core->ops->get_phase(core->hw);
2681 if (ret >= 0)
2682 core->phase = ret;
2683
2684 return ret;
2685 }
2686
2687 /**
2688 * clk_get_phase - return the phase shift of a clock signal
2689 * @clk: clock signal source
2690 *
2691 * Returns the phase shift of a clock node in degrees, otherwise returns
2692 * -EERROR.
2693 */
clk_get_phase(struct clk * clk)2694 int clk_get_phase(struct clk *clk)
2695 {
2696 int ret;
2697
2698 if (!clk)
2699 return 0;
2700
2701 clk_prepare_lock();
2702 ret = clk_core_get_phase(clk->core);
2703 clk_prepare_unlock();
2704
2705 return ret;
2706 }
2707 EXPORT_SYMBOL_GPL(clk_get_phase);
2708
clk_core_reset_duty_cycle_nolock(struct clk_core * core)2709 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2710 {
2711 /* Assume a default value of 50% */
2712 core->duty.num = 1;
2713 core->duty.den = 2;
2714 }
2715
2716 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2717
clk_core_update_duty_cycle_nolock(struct clk_core * core)2718 static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2719 {
2720 struct clk_duty *duty = &core->duty;
2721 int ret = 0;
2722
2723 if (!core->ops->get_duty_cycle)
2724 return clk_core_update_duty_cycle_parent_nolock(core);
2725
2726 ret = core->ops->get_duty_cycle(core->hw, duty);
2727 if (ret)
2728 goto reset;
2729
2730 /* Don't trust the clock provider too much */
2731 if (duty->den == 0 || duty->num > duty->den) {
2732 ret = -EINVAL;
2733 goto reset;
2734 }
2735
2736 return 0;
2737
2738 reset:
2739 clk_core_reset_duty_cycle_nolock(core);
2740 return ret;
2741 }
2742
clk_core_update_duty_cycle_parent_nolock(struct clk_core * core)2743 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
2744 {
2745 int ret = 0;
2746
2747 if (core->parent &&
2748 core->flags & CLK_DUTY_CYCLE_PARENT) {
2749 ret = clk_core_update_duty_cycle_nolock(core->parent);
2750 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2751 } else {
2752 clk_core_reset_duty_cycle_nolock(core);
2753 }
2754
2755 return ret;
2756 }
2757
2758 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2759 struct clk_duty *duty);
2760
clk_core_set_duty_cycle_nolock(struct clk_core * core,struct clk_duty * duty)2761 static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
2762 struct clk_duty *duty)
2763 {
2764 int ret;
2765
2766 lockdep_assert_held(&prepare_lock);
2767
2768 if (clk_core_rate_is_protected(core))
2769 return -EBUSY;
2770
2771 trace_clk_set_duty_cycle(core, duty);
2772
2773 if (!core->ops->set_duty_cycle)
2774 return clk_core_set_duty_cycle_parent_nolock(core, duty);
2775
2776 ret = core->ops->set_duty_cycle(core->hw, duty);
2777 if (!ret)
2778 memcpy(&core->duty, duty, sizeof(*duty));
2779
2780 trace_clk_set_duty_cycle_complete(core, duty);
2781
2782 return ret;
2783 }
2784
clk_core_set_duty_cycle_parent_nolock(struct clk_core * core,struct clk_duty * duty)2785 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2786 struct clk_duty *duty)
2787 {
2788 int ret = 0;
2789
2790 if (core->parent &&
2791 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2792 ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2793 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2794 }
2795
2796 return ret;
2797 }
2798
2799 /**
2800 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
2801 * @clk: clock signal source
2802 * @num: numerator of the duty cycle ratio to be applied
2803 * @den: denominator of the duty cycle ratio to be applied
2804 *
2805 * Apply the duty cycle ratio if the ratio is valid and the clock can
2806 * perform this operation
2807 *
2808 * Returns (0) on success, a negative errno otherwise.
2809 */
clk_set_duty_cycle(struct clk * clk,unsigned int num,unsigned int den)2810 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2811 {
2812 int ret;
2813 struct clk_duty duty;
2814
2815 if (!clk)
2816 return 0;
2817
2818 /* sanity check the ratio */
2819 if (den == 0 || num > den)
2820 return -EINVAL;
2821
2822 duty.num = num;
2823 duty.den = den;
2824
2825 clk_prepare_lock();
2826
2827 if (clk->exclusive_count)
2828 clk_core_rate_unprotect(clk->core);
2829
2830 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2831
2832 if (clk->exclusive_count)
2833 clk_core_rate_protect(clk->core);
2834
2835 clk_prepare_unlock();
2836
2837 return ret;
2838 }
2839 EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
2840
clk_core_get_scaled_duty_cycle(struct clk_core * core,unsigned int scale)2841 static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
2842 unsigned int scale)
2843 {
2844 struct clk_duty *duty = &core->duty;
2845 int ret;
2846
2847 clk_prepare_lock();
2848
2849 ret = clk_core_update_duty_cycle_nolock(core);
2850 if (!ret)
2851 ret = mult_frac(scale, duty->num, duty->den);
2852
2853 clk_prepare_unlock();
2854
2855 return ret;
2856 }
2857
2858 /**
2859 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
2860 * @clk: clock signal source
2861 * @scale: scaling factor to be applied to represent the ratio as an integer
2862 *
2863 * Returns the duty cycle ratio of a clock node multiplied by the provided
2864 * scaling factor, or negative errno on error.
2865 */
clk_get_scaled_duty_cycle(struct clk * clk,unsigned int scale)2866 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2867 {
2868 if (!clk)
2869 return 0;
2870
2871 return clk_core_get_scaled_duty_cycle(clk->core, scale);
2872 }
2873 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
2874
2875 /**
2876 * clk_is_match - check if two clk's point to the same hardware clock
2877 * @p: clk compared against q
2878 * @q: clk compared against p
2879 *
2880 * Returns true if the two struct clk pointers both point to the same hardware
2881 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2882 * share the same struct clk_core object.
2883 *
2884 * Returns false otherwise. Note that two NULL clks are treated as matching.
2885 */
clk_is_match(const struct clk * p,const struct clk * q)2886 bool clk_is_match(const struct clk *p, const struct clk *q)
2887 {
2888 /* trivial case: identical struct clk's or both NULL */
2889 if (p == q)
2890 return true;
2891
2892 /* true if clk->core pointers match. Avoid dereferencing garbage */
2893 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2894 if (p->core == q->core)
2895 return true;
2896
2897 return false;
2898 }
2899 EXPORT_SYMBOL_GPL(clk_is_match);
2900
2901 /*** debugfs support ***/
2902
2903 #ifdef CONFIG_DEBUG_FS
2904 #include <linux/debugfs.h>
2905
2906 static struct dentry *rootdir;
2907 static int inited = 0;
2908 static DEFINE_MUTEX(clk_debug_lock);
2909 static HLIST_HEAD(clk_debug_list);
2910
2911 static struct hlist_head *orphan_list[] = {
2912 &clk_orphan_list,
2913 NULL,
2914 };
2915
clk_summary_show_one(struct seq_file * s,struct clk_core * c,int level)2916 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2917 int level)
2918 {
2919 int phase;
2920
2921 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
2922 level * 3 + 1, "",
2923 30 - level * 3, c->name,
2924 c->enable_count, c->prepare_count, c->protect_count,
2925 clk_core_get_rate_recalc(c),
2926 clk_core_get_accuracy_recalc(c));
2927
2928 phase = clk_core_get_phase(c);
2929 if (phase >= 0)
2930 seq_printf(s, "%5d", phase);
2931 else
2932 seq_puts(s, "-----");
2933
2934 seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
2935 }
2936
clk_summary_show_subtree(struct seq_file * s,struct clk_core * c,int level)2937 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2938 int level)
2939 {
2940 struct clk_core *child;
2941
2942 clk_summary_show_one(s, c, level);
2943
2944 hlist_for_each_entry(child, &c->children, child_node)
2945 clk_summary_show_subtree(s, child, level + 1);
2946 }
2947
clk_summary_show(struct seq_file * s,void * data)2948 static int clk_summary_show(struct seq_file *s, void *data)
2949 {
2950 struct clk_core *c;
2951 struct hlist_head **lists = (struct hlist_head **)s->private;
2952
2953 seq_puts(s, " enable prepare protect duty\n");
2954 seq_puts(s, " clock count count count rate accuracy phase cycle\n");
2955 seq_puts(s, "---------------------------------------------------------------------------------------------\n");
2956
2957 clk_prepare_lock();
2958
2959 for (; *lists; lists++)
2960 hlist_for_each_entry(c, *lists, child_node)
2961 clk_summary_show_subtree(s, c, 0);
2962
2963 clk_prepare_unlock();
2964
2965 return 0;
2966 }
2967 DEFINE_SHOW_ATTRIBUTE(clk_summary);
2968
clk_dump_one(struct seq_file * s,struct clk_core * c,int level)2969 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2970 {
2971 int phase;
2972 unsigned long min_rate, max_rate;
2973
2974 clk_core_get_boundaries(c, &min_rate, &max_rate);
2975
2976 /* This should be JSON format, i.e. elements separated with a comma */
2977 seq_printf(s, "\"%s\": { ", c->name);
2978 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2979 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
2980 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
2981 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
2982 seq_printf(s, "\"min_rate\": %lu,", min_rate);
2983 seq_printf(s, "\"max_rate\": %lu,", max_rate);
2984 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
2985 phase = clk_core_get_phase(c);
2986 if (phase >= 0)
2987 seq_printf(s, "\"phase\": %d,", phase);
2988 seq_printf(s, "\"duty_cycle\": %u",
2989 clk_core_get_scaled_duty_cycle(c, 100000));
2990 }
2991
clk_dump_subtree(struct seq_file * s,struct clk_core * c,int level)2992 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2993 {
2994 struct clk_core *child;
2995
2996 clk_dump_one(s, c, level);
2997
2998 hlist_for_each_entry(child, &c->children, child_node) {
2999 seq_putc(s, ',');
3000 clk_dump_subtree(s, child, level + 1);
3001 }
3002
3003 seq_putc(s, '}');
3004 }
3005
clk_dump_show(struct seq_file * s,void * data)3006 static int clk_dump_show(struct seq_file *s, void *data)
3007 {
3008 struct clk_core *c;
3009 bool first_node = true;
3010 struct hlist_head **lists = (struct hlist_head **)s->private;
3011
3012 seq_putc(s, '{');
3013 clk_prepare_lock();
3014
3015 for (; *lists; lists++) {
3016 hlist_for_each_entry(c, *lists, child_node) {
3017 if (!first_node)
3018 seq_putc(s, ',');
3019 first_node = false;
3020 clk_dump_subtree(s, c, 0);
3021 }
3022 }
3023
3024 clk_prepare_unlock();
3025
3026 seq_puts(s, "}\n");
3027 return 0;
3028 }
3029 DEFINE_SHOW_ATTRIBUTE(clk_dump);
3030
3031 #undef CLOCK_ALLOW_WRITE_DEBUGFS
3032 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3033 /*
3034 * This can be dangerous, therefore don't provide any real compile time
3035 * configuration option for this feature.
3036 * People who want to use this will need to modify the source code directly.
3037 */
clk_rate_set(void * data,u64 val)3038 static int clk_rate_set(void *data, u64 val)
3039 {
3040 struct clk_core *core = data;
3041 int ret;
3042
3043 clk_prepare_lock();
3044 ret = clk_core_set_rate_nolock(core, val);
3045 clk_prepare_unlock();
3046
3047 return ret;
3048 }
3049
3050 #define clk_rate_mode 0644
3051
clk_prepare_enable_set(void * data,u64 val)3052 static int clk_prepare_enable_set(void *data, u64 val)
3053 {
3054 struct clk_core *core = data;
3055 int ret = 0;
3056
3057 if (val)
3058 ret = clk_prepare_enable(core->hw->clk);
3059 else
3060 clk_disable_unprepare(core->hw->clk);
3061
3062 return ret;
3063 }
3064
clk_prepare_enable_get(void * data,u64 * val)3065 static int clk_prepare_enable_get(void *data, u64 *val)
3066 {
3067 struct clk_core *core = data;
3068
3069 *val = core->enable_count && core->prepare_count;
3070 return 0;
3071 }
3072
3073 DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
3074 clk_prepare_enable_set, "%llu\n");
3075
3076 #else
3077 #define clk_rate_set NULL
3078 #define clk_rate_mode 0444
3079 #endif
3080
clk_rate_get(void * data,u64 * val)3081 static int clk_rate_get(void *data, u64 *val)
3082 {
3083 struct clk_core *core = data;
3084
3085 *val = core->rate;
3086 return 0;
3087 }
3088
3089 DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
3090
3091 static const struct {
3092 unsigned long flag;
3093 const char *name;
3094 } clk_flags[] = {
3095 #define ENTRY(f) { f, #f }
3096 ENTRY(CLK_SET_RATE_GATE),
3097 ENTRY(CLK_SET_PARENT_GATE),
3098 ENTRY(CLK_SET_RATE_PARENT),
3099 ENTRY(CLK_IGNORE_UNUSED),
3100 ENTRY(CLK_GET_RATE_NOCACHE),
3101 ENTRY(CLK_SET_RATE_NO_REPARENT),
3102 ENTRY(CLK_GET_ACCURACY_NOCACHE),
3103 ENTRY(CLK_RECALC_NEW_RATES),
3104 ENTRY(CLK_SET_RATE_UNGATE),
3105 ENTRY(CLK_IS_CRITICAL),
3106 ENTRY(CLK_OPS_PARENT_ENABLE),
3107 ENTRY(CLK_DUTY_CYCLE_PARENT),
3108 #undef ENTRY
3109 };
3110
clk_flags_show(struct seq_file * s,void * data)3111 static int clk_flags_show(struct seq_file *s, void *data)
3112 {
3113 struct clk_core *core = s->private;
3114 unsigned long flags = core->flags;
3115 unsigned int i;
3116
3117 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3118 if (flags & clk_flags[i].flag) {
3119 seq_printf(s, "%s\n", clk_flags[i].name);
3120 flags &= ~clk_flags[i].flag;
3121 }
3122 }
3123 if (flags) {
3124 /* Unknown flags */
3125 seq_printf(s, "0x%lx\n", flags);
3126 }
3127
3128 return 0;
3129 }
3130 DEFINE_SHOW_ATTRIBUTE(clk_flags);
3131
possible_parent_show(struct seq_file * s,struct clk_core * core,unsigned int i,char terminator)3132 static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3133 unsigned int i, char terminator)
3134 {
3135 struct clk_core *parent;
3136
3137 /*
3138 * Go through the following options to fetch a parent's name.
3139 *
3140 * 1. Fetch the registered parent clock and use its name
3141 * 2. Use the global (fallback) name if specified
3142 * 3. Use the local fw_name if provided
3143 * 4. Fetch parent clock's clock-output-name if DT index was set
3144 *
3145 * This may still fail in some cases, such as when the parent is
3146 * specified directly via a struct clk_hw pointer, but it isn't
3147 * registered (yet).
3148 */
3149 parent = clk_core_get_parent_by_index(core, i);
3150 if (parent)
3151 seq_puts(s, parent->name);
3152 else if (core->parents[i].name)
3153 seq_puts(s, core->parents[i].name);
3154 else if (core->parents[i].fw_name)
3155 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3156 else if (core->parents[i].index >= 0)
3157 seq_puts(s,
3158 of_clk_get_parent_name(core->of_node,
3159 core->parents[i].index));
3160 else
3161 seq_puts(s, "(missing)");
3162
3163 seq_putc(s, terminator);
3164 }
3165
possible_parents_show(struct seq_file * s,void * data)3166 static int possible_parents_show(struct seq_file *s, void *data)
3167 {
3168 struct clk_core *core = s->private;
3169 int i;
3170
3171 for (i = 0; i < core->num_parents - 1; i++)
3172 possible_parent_show(s, core, i, ' ');
3173
3174 possible_parent_show(s, core, i, '\n');
3175
3176 return 0;
3177 }
3178 DEFINE_SHOW_ATTRIBUTE(possible_parents);
3179
current_parent_show(struct seq_file * s,void * data)3180 static int current_parent_show(struct seq_file *s, void *data)
3181 {
3182 struct clk_core *core = s->private;
3183
3184 if (core->parent)
3185 seq_printf(s, "%s\n", core->parent->name);
3186
3187 return 0;
3188 }
3189 DEFINE_SHOW_ATTRIBUTE(current_parent);
3190
clk_duty_cycle_show(struct seq_file * s,void * data)3191 static int clk_duty_cycle_show(struct seq_file *s, void *data)
3192 {
3193 struct clk_core *core = s->private;
3194 struct clk_duty *duty = &core->duty;
3195
3196 seq_printf(s, "%u/%u\n", duty->num, duty->den);
3197
3198 return 0;
3199 }
3200 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3201
clk_min_rate_show(struct seq_file * s,void * data)3202 static int clk_min_rate_show(struct seq_file *s, void *data)
3203 {
3204 struct clk_core *core = s->private;
3205 unsigned long min_rate, max_rate;
3206
3207 clk_prepare_lock();
3208 clk_core_get_boundaries(core, &min_rate, &max_rate);
3209 clk_prepare_unlock();
3210 seq_printf(s, "%lu\n", min_rate);
3211
3212 return 0;
3213 }
3214 DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
3215
clk_max_rate_show(struct seq_file * s,void * data)3216 static int clk_max_rate_show(struct seq_file *s, void *data)
3217 {
3218 struct clk_core *core = s->private;
3219 unsigned long min_rate, max_rate;
3220
3221 clk_prepare_lock();
3222 clk_core_get_boundaries(core, &min_rate, &max_rate);
3223 clk_prepare_unlock();
3224 seq_printf(s, "%lu\n", max_rate);
3225
3226 return 0;
3227 }
3228 DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
3229
clk_debug_create_one(struct clk_core * core,struct dentry * pdentry)3230 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3231 {
3232 struct dentry *root;
3233
3234 if (!core || !pdentry)
3235 return;
3236
3237 root = debugfs_create_dir(core->name, pdentry);
3238 core->dentry = root;
3239
3240 debugfs_create_file("clk_rate", clk_rate_mode, root, core,
3241 &clk_rate_fops);
3242 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3243 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3244 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3245 debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3246 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3247 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3248 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3249 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3250 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3251 debugfs_create_file("clk_duty_cycle", 0444, root, core,
3252 &clk_duty_cycle_fops);
3253 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3254 debugfs_create_file("clk_prepare_enable", 0644, root, core,
3255 &clk_prepare_enable_fops);
3256 #endif
3257
3258 if (core->num_parents > 0)
3259 debugfs_create_file("clk_parent", 0444, root, core,
3260 ¤t_parent_fops);
3261
3262 if (core->num_parents > 1)
3263 debugfs_create_file("clk_possible_parents", 0444, root, core,
3264 &possible_parents_fops);
3265
3266 if (core->ops->debug_init)
3267 core->ops->debug_init(core->hw, core->dentry);
3268 }
3269
3270 /**
3271 * clk_debug_register - add a clk node to the debugfs clk directory
3272 * @core: the clk being added to the debugfs clk directory
3273 *
3274 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
3275 * initialized. Otherwise it bails out early since the debugfs clk directory
3276 * will be created lazily by clk_debug_init as part of a late_initcall.
3277 */
clk_debug_register(struct clk_core * core)3278 static void clk_debug_register(struct clk_core *core)
3279 {
3280 mutex_lock(&clk_debug_lock);
3281 hlist_add_head(&core->debug_node, &clk_debug_list);
3282 if (inited)
3283 clk_debug_create_one(core, rootdir);
3284 mutex_unlock(&clk_debug_lock);
3285 }
3286
3287 /**
3288 * clk_debug_unregister - remove a clk node from the debugfs clk directory
3289 * @core: the clk being removed from the debugfs clk directory
3290 *
3291 * Dynamically removes a clk and all its child nodes from the
3292 * debugfs clk directory if clk->dentry points to debugfs created by
3293 * clk_debug_register in __clk_core_init.
3294 */
clk_debug_unregister(struct clk_core * core)3295 static void clk_debug_unregister(struct clk_core *core)
3296 {
3297 mutex_lock(&clk_debug_lock);
3298 hlist_del_init(&core->debug_node);
3299 debugfs_remove_recursive(core->dentry);
3300 core->dentry = NULL;
3301 mutex_unlock(&clk_debug_lock);
3302 }
3303
3304 /**
3305 * clk_debug_init - lazily populate the debugfs clk directory
3306 *
3307 * clks are often initialized very early during boot before memory can be
3308 * dynamically allocated and well before debugfs is setup. This function
3309 * populates the debugfs clk directory once at boot-time when we know that
3310 * debugfs is setup. It should only be called once at boot-time, all other clks
3311 * added dynamically will be done so with clk_debug_register.
3312 */
clk_debug_init(void)3313 static int __init clk_debug_init(void)
3314 {
3315 struct clk_core *core;
3316
3317 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3318 pr_warn("\n");
3319 pr_warn("********************************************************************\n");
3320 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3321 pr_warn("** **\n");
3322 pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
3323 pr_warn("** **\n");
3324 pr_warn("** This means that this kernel is built to expose clk operations **\n");
3325 pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n");
3326 pr_warn("** to userspace, which may compromise security on your system. **\n");
3327 pr_warn("** **\n");
3328 pr_warn("** If you see this message and you are not debugging the **\n");
3329 pr_warn("** kernel, report this immediately to your vendor! **\n");
3330 pr_warn("** **\n");
3331 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3332 pr_warn("********************************************************************\n");
3333 #endif
3334
3335 rootdir = debugfs_create_dir("clk", NULL);
3336
3337 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3338 &clk_summary_fops);
3339 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3340 &clk_dump_fops);
3341 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3342 &clk_summary_fops);
3343 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3344 &clk_dump_fops);
3345
3346 mutex_lock(&clk_debug_lock);
3347 hlist_for_each_entry(core, &clk_debug_list, debug_node)
3348 clk_debug_create_one(core, rootdir);
3349
3350 inited = 1;
3351 mutex_unlock(&clk_debug_lock);
3352
3353 return 0;
3354 }
3355 late_initcall(clk_debug_init);
3356 #else
clk_debug_register(struct clk_core * core)3357 static inline void clk_debug_register(struct clk_core *core) { }
clk_debug_unregister(struct clk_core * core)3358 static inline void clk_debug_unregister(struct clk_core *core)
3359 {
3360 }
3361 #endif
3362
clk_core_reparent_orphans_nolock(void)3363 static void clk_core_reparent_orphans_nolock(void)
3364 {
3365 struct clk_core *orphan;
3366 struct hlist_node *tmp2;
3367
3368 /*
3369 * walk the list of orphan clocks and reparent any that newly finds a
3370 * parent.
3371 */
3372 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3373 struct clk_core *parent = __clk_init_parent(orphan);
3374
3375 /*
3376 * We need to use __clk_set_parent_before() and _after() to
3377 * to properly migrate any prepare/enable count of the orphan
3378 * clock. This is important for CLK_IS_CRITICAL clocks, which
3379 * are enabled during init but might not have a parent yet.
3380 */
3381 if (parent) {
3382 /* update the clk tree topology */
3383 __clk_set_parent_before(orphan, parent);
3384 __clk_set_parent_after(orphan, parent, NULL);
3385 __clk_recalc_accuracies(orphan);
3386 __clk_recalc_rates(orphan, 0);
3387 }
3388 }
3389 }
3390
3391 /**
3392 * __clk_core_init - initialize the data structures in a struct clk_core
3393 * @core: clk_core being initialized
3394 *
3395 * Initializes the lists in struct clk_core, queries the hardware for the
3396 * parent and rate and sets them both.
3397 */
__clk_core_init(struct clk_core * core)3398 static int __clk_core_init(struct clk_core *core)
3399 {
3400 int ret;
3401 struct clk_core *parent;
3402 unsigned long rate;
3403 int phase;
3404
3405 if (!core)
3406 return -EINVAL;
3407
3408 clk_prepare_lock();
3409
3410 /*
3411 * Set hw->core after grabbing the prepare_lock to synchronize with
3412 * callers of clk_core_fill_parent_index() where we treat hw->core
3413 * being NULL as the clk not being registered yet. This is crucial so
3414 * that clks aren't parented until their parent is fully registered.
3415 */
3416 core->hw->core = core;
3417
3418 ret = clk_pm_runtime_get(core);
3419 if (ret)
3420 goto unlock;
3421
3422 /* check to see if a clock with this name is already registered */
3423 if (clk_core_lookup(core->name)) {
3424 pr_debug("%s: clk %s already initialized\n",
3425 __func__, core->name);
3426 ret = -EEXIST;
3427 goto out;
3428 }
3429
3430 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */
3431 if (core->ops->set_rate &&
3432 !((core->ops->round_rate || core->ops->determine_rate) &&
3433 core->ops->recalc_rate)) {
3434 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3435 __func__, core->name);
3436 ret = -EINVAL;
3437 goto out;
3438 }
3439
3440 if (core->ops->set_parent && !core->ops->get_parent) {
3441 pr_err("%s: %s must implement .get_parent & .set_parent\n",
3442 __func__, core->name);
3443 ret = -EINVAL;
3444 goto out;
3445 }
3446
3447 if (core->num_parents > 1 && !core->ops->get_parent) {
3448 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3449 __func__, core->name);
3450 ret = -EINVAL;
3451 goto out;
3452 }
3453
3454 if (core->ops->set_rate_and_parent &&
3455 !(core->ops->set_parent && core->ops->set_rate)) {
3456 pr_err("%s: %s must implement .set_parent & .set_rate\n",
3457 __func__, core->name);
3458 ret = -EINVAL;
3459 goto out;
3460 }
3461
3462 /*
3463 * optional platform-specific magic
3464 *
3465 * The .init callback is not used by any of the basic clock types, but
3466 * exists for weird hardware that must perform initialization magic for
3467 * CCF to get an accurate view of clock for any other callbacks. It may
3468 * also be used needs to perform dynamic allocations. Such allocation
3469 * must be freed in the terminate() callback.
3470 * This callback shall not be used to initialize the parameters state,
3471 * such as rate, parent, etc ...
3472 *
3473 * If it exist, this callback should called before any other callback of
3474 * the clock
3475 */
3476 if (core->ops->init) {
3477 ret = core->ops->init(core->hw);
3478 if (ret)
3479 goto out;
3480 }
3481
3482 parent = core->parent = __clk_init_parent(core);
3483
3484 /*
3485 * Populate core->parent if parent has already been clk_core_init'd. If
3486 * parent has not yet been clk_core_init'd then place clk in the orphan
3487 * list. If clk doesn't have any parents then place it in the root
3488 * clk list.
3489 *
3490 * Every time a new clk is clk_init'd then we walk the list of orphan
3491 * clocks and re-parent any that are children of the clock currently
3492 * being clk_init'd.
3493 */
3494 if (parent) {
3495 hlist_add_head(&core->child_node, &parent->children);
3496 core->orphan = parent->orphan;
3497 } else if (!core->num_parents) {
3498 hlist_add_head(&core->child_node, &clk_root_list);
3499 core->orphan = false;
3500 } else {
3501 hlist_add_head(&core->child_node, &clk_orphan_list);
3502 core->orphan = true;
3503 }
3504
3505 /*
3506 * Set clk's accuracy. The preferred method is to use
3507 * .recalc_accuracy. For simple clocks and lazy developers the default
3508 * fallback is to use the parent's accuracy. If a clock doesn't have a
3509 * parent (or is orphaned) then accuracy is set to zero (perfect
3510 * clock).
3511 */
3512 if (core->ops->recalc_accuracy)
3513 core->accuracy = core->ops->recalc_accuracy(core->hw,
3514 clk_core_get_accuracy_no_lock(parent));
3515 else if (parent)
3516 core->accuracy = parent->accuracy;
3517 else
3518 core->accuracy = 0;
3519
3520 /*
3521 * Set clk's phase by clk_core_get_phase() caching the phase.
3522 * Since a phase is by definition relative to its parent, just
3523 * query the current clock phase, or just assume it's in phase.
3524 */
3525 phase = clk_core_get_phase(core);
3526 if (phase < 0) {
3527 ret = phase;
3528 pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
3529 core->name);
3530 goto out;
3531 }
3532
3533 /*
3534 * Set clk's duty cycle.
3535 */
3536 clk_core_update_duty_cycle_nolock(core);
3537
3538 /*
3539 * Set clk's rate. The preferred method is to use .recalc_rate. For
3540 * simple clocks and lazy developers the default fallback is to use the
3541 * parent's rate. If a clock doesn't have a parent (or is orphaned)
3542 * then rate is set to zero.
3543 */
3544 if (core->ops->recalc_rate)
3545 rate = core->ops->recalc_rate(core->hw,
3546 clk_core_get_rate_nolock(parent));
3547 else if (parent)
3548 rate = parent->rate;
3549 else
3550 rate = 0;
3551 core->rate = core->req_rate = rate;
3552
3553 /*
3554 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
3555 * don't get accidentally disabled when walking the orphan tree and
3556 * reparenting clocks
3557 */
3558 if (core->flags & CLK_IS_CRITICAL) {
3559 unsigned long flags;
3560
3561 ret = clk_core_prepare(core);
3562 if (ret) {
3563 pr_warn("%s: critical clk '%s' failed to prepare\n",
3564 __func__, core->name);
3565 goto out;
3566 }
3567
3568 flags = clk_enable_lock();
3569 ret = clk_core_enable(core);
3570 clk_enable_unlock(flags);
3571 if (ret) {
3572 pr_warn("%s: critical clk '%s' failed to enable\n",
3573 __func__, core->name);
3574 clk_core_unprepare(core);
3575 goto out;
3576 }
3577 }
3578
3579 clk_core_reparent_orphans_nolock();
3580
3581
3582 kref_init(&core->ref);
3583 out:
3584 clk_pm_runtime_put(core);
3585 unlock:
3586 if (ret) {
3587 hlist_del_init(&core->child_node);
3588 core->hw->core = NULL;
3589 }
3590
3591 clk_prepare_unlock();
3592
3593 if (!ret)
3594 clk_debug_register(core);
3595
3596 return ret;
3597 }
3598
3599 /**
3600 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
3601 * @core: clk to add consumer to
3602 * @clk: consumer to link to a clk
3603 */
clk_core_link_consumer(struct clk_core * core,struct clk * clk)3604 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3605 {
3606 clk_prepare_lock();
3607 hlist_add_head(&clk->clks_node, &core->clks);
3608 clk_prepare_unlock();
3609 }
3610
3611 /**
3612 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
3613 * @clk: consumer to unlink
3614 */
clk_core_unlink_consumer(struct clk * clk)3615 static void clk_core_unlink_consumer(struct clk *clk)
3616 {
3617 lockdep_assert_held(&prepare_lock);
3618 hlist_del(&clk->clks_node);
3619 }
3620
3621 /**
3622 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
3623 * @core: clk to allocate a consumer for
3624 * @dev_id: string describing device name
3625 * @con_id: connection ID string on device
3626 *
3627 * Returns: clk consumer left unlinked from the consumer list
3628 */
alloc_clk(struct clk_core * core,const char * dev_id,const char * con_id)3629 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3630 const char *con_id)
3631 {
3632 struct clk *clk;
3633
3634 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3635 if (!clk)
3636 return ERR_PTR(-ENOMEM);
3637
3638 clk->core = core;
3639 clk->dev_id = dev_id;
3640 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3641 clk->max_rate = ULONG_MAX;
3642
3643 return clk;
3644 }
3645
3646 /**
3647 * free_clk - Free a clk consumer
3648 * @clk: clk consumer to free
3649 *
3650 * Note, this assumes the clk has been unlinked from the clk_core consumer
3651 * list.
3652 */
free_clk(struct clk * clk)3653 static void free_clk(struct clk *clk)
3654 {
3655 kfree_const(clk->con_id);
3656 kfree(clk);
3657 }
3658
3659 /**
3660 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
3661 * a clk_hw
3662 * @dev: clk consumer device
3663 * @hw: clk_hw associated with the clk being consumed
3664 * @dev_id: string describing device name
3665 * @con_id: connection ID string on device
3666 *
3667 * This is the main function used to create a clk pointer for use by clk
3668 * consumers. It connects a consumer to the clk_core and clk_hw structures
3669 * used by the framework and clk provider respectively.
3670 */
clk_hw_create_clk(struct device * dev,struct clk_hw * hw,const char * dev_id,const char * con_id)3671 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3672 const char *dev_id, const char *con_id)
3673 {
3674 struct clk *clk;
3675 struct clk_core *core;
3676
3677 /* This is to allow this function to be chained to others */
3678 if (IS_ERR_OR_NULL(hw))
3679 return ERR_CAST(hw);
3680
3681 core = hw->core;
3682 clk = alloc_clk(core, dev_id, con_id);
3683 if (IS_ERR(clk))
3684 return clk;
3685 clk->dev = dev;
3686
3687 if (!try_module_get(core->owner)) {
3688 free_clk(clk);
3689 return ERR_PTR(-ENOENT);
3690 }
3691
3692 kref_get(&core->ref);
3693 clk_core_link_consumer(core, clk);
3694
3695 return clk;
3696 }
3697
clk_cpy_name(const char ** dst_p,const char * src,bool must_exist)3698 static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
3699 {
3700 const char *dst;
3701
3702 if (!src) {
3703 if (must_exist)
3704 return -EINVAL;
3705 return 0;
3706 }
3707
3708 *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
3709 if (!dst)
3710 return -ENOMEM;
3711
3712 return 0;
3713 }
3714
clk_core_populate_parent_map(struct clk_core * core,const struct clk_init_data * init)3715 static int clk_core_populate_parent_map(struct clk_core *core,
3716 const struct clk_init_data *init)
3717 {
3718 u8 num_parents = init->num_parents;
3719 const char * const *parent_names = init->parent_names;
3720 const struct clk_hw **parent_hws = init->parent_hws;
3721 const struct clk_parent_data *parent_data = init->parent_data;
3722 int i, ret = 0;
3723 struct clk_parent_map *parents, *parent;
3724
3725 if (!num_parents)
3726 return 0;
3727
3728 /*
3729 * Avoid unnecessary string look-ups of clk_core's possible parents by
3730 * having a cache of names/clk_hw pointers to clk_core pointers.
3731 */
3732 parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
3733 core->parents = parents;
3734 if (!parents)
3735 return -ENOMEM;
3736
3737 /* Copy everything over because it might be __initdata */
3738 for (i = 0, parent = parents; i < num_parents; i++, parent++) {
3739 parent->index = -1;
3740 if (parent_names) {
3741 /* throw a WARN if any entries are NULL */
3742 WARN(!parent_names[i],
3743 "%s: invalid NULL in %s's .parent_names\n",
3744 __func__, core->name);
3745 ret = clk_cpy_name(&parent->name, parent_names[i],
3746 true);
3747 } else if (parent_data) {
3748 parent->hw = parent_data[i].hw;
3749 parent->index = parent_data[i].index;
3750 ret = clk_cpy_name(&parent->fw_name,
3751 parent_data[i].fw_name, false);
3752 if (!ret)
3753 ret = clk_cpy_name(&parent->name,
3754 parent_data[i].name,
3755 false);
3756 } else if (parent_hws) {
3757 parent->hw = parent_hws[i];
3758 } else {
3759 ret = -EINVAL;
3760 WARN(1, "Must specify parents if num_parents > 0\n");
3761 }
3762
3763 if (ret) {
3764 do {
3765 kfree_const(parents[i].name);
3766 kfree_const(parents[i].fw_name);
3767 } while (--i >= 0);
3768 kfree(parents);
3769
3770 return ret;
3771 }
3772 }
3773
3774 return 0;
3775 }
3776
clk_core_free_parent_map(struct clk_core * core)3777 static void clk_core_free_parent_map(struct clk_core *core)
3778 {
3779 int i = core->num_parents;
3780
3781 if (!core->num_parents)
3782 return;
3783
3784 while (--i >= 0) {
3785 kfree_const(core->parents[i].name);
3786 kfree_const(core->parents[i].fw_name);
3787 }
3788
3789 kfree(core->parents);
3790 }
3791
3792 static struct clk *
__clk_register(struct device * dev,struct device_node * np,struct clk_hw * hw)3793 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
3794 {
3795 int ret;
3796 struct clk_core *core;
3797 const struct clk_init_data *init = hw->init;
3798
3799 /*
3800 * The init data is not supposed to be used outside of registration path.
3801 * Set it to NULL so that provider drivers can't use it either and so that
3802 * we catch use of hw->init early on in the core.
3803 */
3804 hw->init = NULL;
3805
3806 core = kzalloc(sizeof(*core), GFP_KERNEL);
3807 if (!core) {
3808 ret = -ENOMEM;
3809 goto fail_out;
3810 }
3811
3812 core->name = kstrdup_const(init->name, GFP_KERNEL);
3813 if (!core->name) {
3814 ret = -ENOMEM;
3815 goto fail_name;
3816 }
3817
3818 if (WARN_ON(!init->ops)) {
3819 ret = -EINVAL;
3820 goto fail_ops;
3821 }
3822 core->ops = init->ops;
3823
3824 if (dev && pm_runtime_enabled(dev))
3825 core->rpm_enabled = true;
3826 core->dev = dev;
3827 core->of_node = np;
3828 if (dev && dev->driver)
3829 core->owner = dev->driver->owner;
3830 core->hw = hw;
3831 core->flags = init->flags;
3832 core->num_parents = init->num_parents;
3833 core->min_rate = 0;
3834 core->max_rate = ULONG_MAX;
3835
3836 ret = clk_core_populate_parent_map(core, init);
3837 if (ret)
3838 goto fail_parents;
3839
3840 INIT_HLIST_HEAD(&core->clks);
3841
3842 /*
3843 * Don't call clk_hw_create_clk() here because that would pin the
3844 * provider module to itself and prevent it from ever being removed.
3845 */
3846 hw->clk = alloc_clk(core, NULL, NULL);
3847 if (IS_ERR(hw->clk)) {
3848 ret = PTR_ERR(hw->clk);
3849 goto fail_create_clk;
3850 }
3851
3852 clk_core_link_consumer(core, hw->clk);
3853
3854 ret = __clk_core_init(core);
3855 if (!ret)
3856 return hw->clk;
3857
3858 clk_prepare_lock();
3859 clk_core_unlink_consumer(hw->clk);
3860 clk_prepare_unlock();
3861
3862 free_clk(hw->clk);
3863 hw->clk = NULL;
3864
3865 fail_create_clk:
3866 clk_core_free_parent_map(core);
3867 fail_parents:
3868 fail_ops:
3869 kfree_const(core->name);
3870 fail_name:
3871 kfree(core);
3872 fail_out:
3873 return ERR_PTR(ret);
3874 }
3875
3876 /**
3877 * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
3878 * @dev: Device to get device node of
3879 *
3880 * Return: device node pointer of @dev, or the device node pointer of
3881 * @dev->parent if dev doesn't have a device node, or NULL if neither
3882 * @dev or @dev->parent have a device node.
3883 */
dev_or_parent_of_node(struct device * dev)3884 static struct device_node *dev_or_parent_of_node(struct device *dev)
3885 {
3886 struct device_node *np;
3887
3888 if (!dev)
3889 return NULL;
3890
3891 np = dev_of_node(dev);
3892 if (!np)
3893 np = dev_of_node(dev->parent);
3894
3895 return np;
3896 }
3897
3898 /**
3899 * clk_register - allocate a new clock, register it and return an opaque cookie
3900 * @dev: device that is registering this clock
3901 * @hw: link to hardware-specific clock data
3902 *
3903 * clk_register is the *deprecated* interface for populating the clock tree with
3904 * new clock nodes. Use clk_hw_register() instead.
3905 *
3906 * Returns: a pointer to the newly allocated struct clk which
3907 * cannot be dereferenced by driver code but may be used in conjunction with the
3908 * rest of the clock API. In the event of an error clk_register will return an
3909 * error code; drivers must test for an error code after calling clk_register.
3910 */
clk_register(struct device * dev,struct clk_hw * hw)3911 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
3912 {
3913 return __clk_register(dev, dev_or_parent_of_node(dev), hw);
3914 }
3915 EXPORT_SYMBOL_GPL(clk_register);
3916
3917 /**
3918 * clk_hw_register - register a clk_hw and return an error code
3919 * @dev: device that is registering this clock
3920 * @hw: link to hardware-specific clock data
3921 *
3922 * clk_hw_register is the primary interface for populating the clock tree with
3923 * new clock nodes. It returns an integer equal to zero indicating success or
3924 * less than zero indicating failure. Drivers must test for an error code after
3925 * calling clk_hw_register().
3926 */
clk_hw_register(struct device * dev,struct clk_hw * hw)3927 int clk_hw_register(struct device *dev, struct clk_hw *hw)
3928 {
3929 return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
3930 hw));
3931 }
3932 EXPORT_SYMBOL_GPL(clk_hw_register);
3933
3934 /*
3935 * of_clk_hw_register - register a clk_hw and return an error code
3936 * @node: device_node of device that is registering this clock
3937 * @hw: link to hardware-specific clock data
3938 *
3939 * of_clk_hw_register() is the primary interface for populating the clock tree
3940 * with new clock nodes when a struct device is not available, but a struct
3941 * device_node is. It returns an integer equal to zero indicating success or
3942 * less than zero indicating failure. Drivers must test for an error code after
3943 * calling of_clk_hw_register().
3944 */
of_clk_hw_register(struct device_node * node,struct clk_hw * hw)3945 int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
3946 {
3947 return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
3948 }
3949 EXPORT_SYMBOL_GPL(of_clk_hw_register);
3950
3951 /* Free memory allocated for a clock. */
__clk_release(struct kref * ref)3952 static void __clk_release(struct kref *ref)
3953 {
3954 struct clk_core *core = container_of(ref, struct clk_core, ref);
3955
3956 lockdep_assert_held(&prepare_lock);
3957
3958 clk_core_free_parent_map(core);
3959 kfree_const(core->name);
3960 kfree(core);
3961 }
3962
3963 /*
3964 * Empty clk_ops for unregistered clocks. These are used temporarily
3965 * after clk_unregister() was called on a clock and until last clock
3966 * consumer calls clk_put() and the struct clk object is freed.
3967 */
clk_nodrv_prepare_enable(struct clk_hw * hw)3968 static int clk_nodrv_prepare_enable(struct clk_hw *hw)
3969 {
3970 return -ENXIO;
3971 }
3972
clk_nodrv_disable_unprepare(struct clk_hw * hw)3973 static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
3974 {
3975 WARN_ON_ONCE(1);
3976 }
3977
clk_nodrv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)3978 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
3979 unsigned long parent_rate)
3980 {
3981 return -ENXIO;
3982 }
3983
clk_nodrv_set_parent(struct clk_hw * hw,u8 index)3984 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
3985 {
3986 return -ENXIO;
3987 }
3988
3989 static const struct clk_ops clk_nodrv_ops = {
3990 .enable = clk_nodrv_prepare_enable,
3991 .disable = clk_nodrv_disable_unprepare,
3992 .prepare = clk_nodrv_prepare_enable,
3993 .unprepare = clk_nodrv_disable_unprepare,
3994 .set_rate = clk_nodrv_set_rate,
3995 .set_parent = clk_nodrv_set_parent,
3996 };
3997
clk_core_evict_parent_cache_subtree(struct clk_core * root,struct clk_core * target)3998 static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
3999 struct clk_core *target)
4000 {
4001 int i;
4002 struct clk_core *child;
4003
4004 for (i = 0; i < root->num_parents; i++)
4005 if (root->parents[i].core == target)
4006 root->parents[i].core = NULL;
4007
4008 hlist_for_each_entry(child, &root->children, child_node)
4009 clk_core_evict_parent_cache_subtree(child, target);
4010 }
4011
4012 /* Remove this clk from all parent caches */
clk_core_evict_parent_cache(struct clk_core * core)4013 static void clk_core_evict_parent_cache(struct clk_core *core)
4014 {
4015 struct hlist_head **lists;
4016 struct clk_core *root;
4017
4018 lockdep_assert_held(&prepare_lock);
4019
4020 for (lists = all_lists; *lists; lists++)
4021 hlist_for_each_entry(root, *lists, child_node)
4022 clk_core_evict_parent_cache_subtree(root, core);
4023
4024 }
4025
4026 /**
4027 * clk_unregister - unregister a currently registered clock
4028 * @clk: clock to unregister
4029 */
clk_unregister(struct clk * clk)4030 void clk_unregister(struct clk *clk)
4031 {
4032 unsigned long flags;
4033 const struct clk_ops *ops;
4034
4035 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4036 return;
4037
4038 clk_debug_unregister(clk->core);
4039
4040 clk_prepare_lock();
4041
4042 ops = clk->core->ops;
4043 if (ops == &clk_nodrv_ops) {
4044 pr_err("%s: unregistered clock: %s\n", __func__,
4045 clk->core->name);
4046 goto unlock;
4047 }
4048 /*
4049 * Assign empty clock ops for consumers that might still hold
4050 * a reference to this clock.
4051 */
4052 flags = clk_enable_lock();
4053 clk->core->ops = &clk_nodrv_ops;
4054 clk_enable_unlock(flags);
4055
4056 if (ops->terminate)
4057 ops->terminate(clk->core->hw);
4058
4059 if (!hlist_empty(&clk->core->children)) {
4060 struct clk_core *child;
4061 struct hlist_node *t;
4062
4063 /* Reparent all children to the orphan list. */
4064 hlist_for_each_entry_safe(child, t, &clk->core->children,
4065 child_node)
4066 clk_core_set_parent_nolock(child, NULL);
4067 }
4068
4069 clk_core_evict_parent_cache(clk->core);
4070
4071 hlist_del_init(&clk->core->child_node);
4072
4073 if (clk->core->prepare_count)
4074 pr_warn("%s: unregistering prepared clock: %s\n",
4075 __func__, clk->core->name);
4076
4077 if (clk->core->protect_count)
4078 pr_warn("%s: unregistering protected clock: %s\n",
4079 __func__, clk->core->name);
4080
4081 kref_put(&clk->core->ref, __clk_release);
4082 free_clk(clk);
4083 unlock:
4084 clk_prepare_unlock();
4085 }
4086 EXPORT_SYMBOL_GPL(clk_unregister);
4087
4088 /**
4089 * clk_hw_unregister - unregister a currently registered clk_hw
4090 * @hw: hardware-specific clock data to unregister
4091 */
clk_hw_unregister(struct clk_hw * hw)4092 void clk_hw_unregister(struct clk_hw *hw)
4093 {
4094 clk_unregister(hw->clk);
4095 }
4096 EXPORT_SYMBOL_GPL(clk_hw_unregister);
4097
devm_clk_release(struct device * dev,void * res)4098 static void devm_clk_release(struct device *dev, void *res)
4099 {
4100 clk_unregister(*(struct clk **)res);
4101 }
4102
devm_clk_hw_release(struct device * dev,void * res)4103 static void devm_clk_hw_release(struct device *dev, void *res)
4104 {
4105 clk_hw_unregister(*(struct clk_hw **)res);
4106 }
4107
4108 /**
4109 * devm_clk_register - resource managed clk_register()
4110 * @dev: device that is registering this clock
4111 * @hw: link to hardware-specific clock data
4112 *
4113 * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
4114 *
4115 * Clocks returned from this function are automatically clk_unregister()ed on
4116 * driver detach. See clk_register() for more information.
4117 */
devm_clk_register(struct device * dev,struct clk_hw * hw)4118 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
4119 {
4120 struct clk *clk;
4121 struct clk **clkp;
4122
4123 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
4124 if (!clkp)
4125 return ERR_PTR(-ENOMEM);
4126
4127 clk = clk_register(dev, hw);
4128 if (!IS_ERR(clk)) {
4129 *clkp = clk;
4130 devres_add(dev, clkp);
4131 } else {
4132 devres_free(clkp);
4133 }
4134
4135 return clk;
4136 }
4137 EXPORT_SYMBOL_GPL(devm_clk_register);
4138
4139 /**
4140 * devm_clk_hw_register - resource managed clk_hw_register()
4141 * @dev: device that is registering this clock
4142 * @hw: link to hardware-specific clock data
4143 *
4144 * Managed clk_hw_register(). Clocks registered by this function are
4145 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
4146 * for more information.
4147 */
devm_clk_hw_register(struct device * dev,struct clk_hw * hw)4148 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
4149 {
4150 struct clk_hw **hwp;
4151 int ret;
4152
4153 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
4154 if (!hwp)
4155 return -ENOMEM;
4156
4157 ret = clk_hw_register(dev, hw);
4158 if (!ret) {
4159 *hwp = hw;
4160 devres_add(dev, hwp);
4161 } else {
4162 devres_free(hwp);
4163 }
4164
4165 return ret;
4166 }
4167 EXPORT_SYMBOL_GPL(devm_clk_hw_register);
4168
devm_clk_match(struct device * dev,void * res,void * data)4169 static int devm_clk_match(struct device *dev, void *res, void *data)
4170 {
4171 struct clk *c = res;
4172 if (WARN_ON(!c))
4173 return 0;
4174 return c == data;
4175 }
4176
devm_clk_hw_match(struct device * dev,void * res,void * data)4177 static int devm_clk_hw_match(struct device *dev, void *res, void *data)
4178 {
4179 struct clk_hw *hw = res;
4180
4181 if (WARN_ON(!hw))
4182 return 0;
4183 return hw == data;
4184 }
4185
4186 /**
4187 * devm_clk_unregister - resource managed clk_unregister()
4188 * @dev: device that is unregistering the clock data
4189 * @clk: clock to unregister
4190 *
4191 * Deallocate a clock allocated with devm_clk_register(). Normally
4192 * this function will not need to be called and the resource management
4193 * code will ensure that the resource is freed.
4194 */
devm_clk_unregister(struct device * dev,struct clk * clk)4195 void devm_clk_unregister(struct device *dev, struct clk *clk)
4196 {
4197 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
4198 }
4199 EXPORT_SYMBOL_GPL(devm_clk_unregister);
4200
4201 /**
4202 * devm_clk_hw_unregister - resource managed clk_hw_unregister()
4203 * @dev: device that is unregistering the hardware-specific clock data
4204 * @hw: link to hardware-specific clock data
4205 *
4206 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
4207 * this function will not need to be called and the resource management
4208 * code will ensure that the resource is freed.
4209 */
devm_clk_hw_unregister(struct device * dev,struct clk_hw * hw)4210 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
4211 {
4212 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
4213 hw));
4214 }
4215 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
4216
4217 /*
4218 * clkdev helpers
4219 */
4220
__clk_put(struct clk * clk)4221 void __clk_put(struct clk *clk)
4222 {
4223 struct module *owner;
4224
4225 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4226 return;
4227
4228 clk_prepare_lock();
4229
4230 /*
4231 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
4232 * given user should be balanced with calls to clk_rate_exclusive_put()
4233 * and by that same consumer
4234 */
4235 if (WARN_ON(clk->exclusive_count)) {
4236 /* We voiced our concern, let's sanitize the situation */
4237 clk->core->protect_count -= (clk->exclusive_count - 1);
4238 clk_core_rate_unprotect(clk->core);
4239 clk->exclusive_count = 0;
4240 }
4241
4242 hlist_del(&clk->clks_node);
4243 if (clk->min_rate > clk->core->req_rate ||
4244 clk->max_rate < clk->core->req_rate)
4245 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4246
4247 owner = clk->core->owner;
4248 kref_put(&clk->core->ref, __clk_release);
4249
4250 clk_prepare_unlock();
4251
4252 module_put(owner);
4253
4254 free_clk(clk);
4255 }
4256
4257 /*** clk rate change notifiers ***/
4258
4259 /**
4260 * clk_notifier_register - add a clk rate change notifier
4261 * @clk: struct clk * to watch
4262 * @nb: struct notifier_block * with callback info
4263 *
4264 * Request notification when clk's rate changes. This uses an SRCU
4265 * notifier because we want it to block and notifier unregistrations are
4266 * uncommon. The callbacks associated with the notifier must not
4267 * re-enter into the clk framework by calling any top-level clk APIs;
4268 * this will cause a nested prepare_lock mutex.
4269 *
4270 * In all notification cases (pre, post and abort rate change) the original
4271 * clock rate is passed to the callback via struct clk_notifier_data.old_rate
4272 * and the new frequency is passed via struct clk_notifier_data.new_rate.
4273 *
4274 * clk_notifier_register() must be called from non-atomic context.
4275 * Returns -EINVAL if called with null arguments, -ENOMEM upon
4276 * allocation failure; otherwise, passes along the return value of
4277 * srcu_notifier_chain_register().
4278 */
clk_notifier_register(struct clk * clk,struct notifier_block * nb)4279 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4280 {
4281 struct clk_notifier *cn;
4282 int ret = -ENOMEM;
4283
4284 if (!clk || !nb)
4285 return -EINVAL;
4286
4287 clk_prepare_lock();
4288
4289 /* search the list of notifiers for this clk */
4290 list_for_each_entry(cn, &clk_notifier_list, node)
4291 if (cn->clk == clk)
4292 goto found;
4293
4294 /* if clk wasn't in the notifier list, allocate new clk_notifier */
4295 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
4296 if (!cn)
4297 goto out;
4298
4299 cn->clk = clk;
4300 srcu_init_notifier_head(&cn->notifier_head);
4301
4302 list_add(&cn->node, &clk_notifier_list);
4303
4304 found:
4305 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4306
4307 clk->core->notifier_count++;
4308
4309 out:
4310 clk_prepare_unlock();
4311
4312 return ret;
4313 }
4314 EXPORT_SYMBOL_GPL(clk_notifier_register);
4315
4316 /**
4317 * clk_notifier_unregister - remove a clk rate change notifier
4318 * @clk: struct clk *
4319 * @nb: struct notifier_block * with callback info
4320 *
4321 * Request no further notification for changes to 'clk' and frees memory
4322 * allocated in clk_notifier_register.
4323 *
4324 * Returns -EINVAL if called with null arguments; otherwise, passes
4325 * along the return value of srcu_notifier_chain_unregister().
4326 */
clk_notifier_unregister(struct clk * clk,struct notifier_block * nb)4327 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4328 {
4329 struct clk_notifier *cn;
4330 int ret = -ENOENT;
4331
4332 if (!clk || !nb)
4333 return -EINVAL;
4334
4335 clk_prepare_lock();
4336
4337 list_for_each_entry(cn, &clk_notifier_list, node) {
4338 if (cn->clk == clk) {
4339 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4340
4341 clk->core->notifier_count--;
4342
4343 /* XXX the notifier code should handle this better */
4344 if (!cn->notifier_head.head) {
4345 srcu_cleanup_notifier_head(&cn->notifier_head);
4346 list_del(&cn->node);
4347 kfree(cn);
4348 }
4349 break;
4350 }
4351 }
4352
4353 clk_prepare_unlock();
4354
4355 return ret;
4356 }
4357 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
4358
4359 #ifdef CONFIG_OF
clk_core_reparent_orphans(void)4360 static void clk_core_reparent_orphans(void)
4361 {
4362 clk_prepare_lock();
4363 clk_core_reparent_orphans_nolock();
4364 clk_prepare_unlock();
4365 }
4366
4367 /**
4368 * struct of_clk_provider - Clock provider registration structure
4369 * @link: Entry in global list of clock providers
4370 * @node: Pointer to device tree node of clock provider
4371 * @get: Get clock callback. Returns NULL or a struct clk for the
4372 * given clock specifier
4373 * @get_hw: Get clk_hw callback. Returns NULL, ERR_PTR or a
4374 * struct clk_hw for the given clock specifier
4375 * @data: context pointer to be passed into @get callback
4376 */
4377 struct of_clk_provider {
4378 struct list_head link;
4379
4380 struct device_node *node;
4381 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4382 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
4383 void *data;
4384 };
4385
4386 extern struct of_device_id __clk_of_table;
4387 static const struct of_device_id __clk_of_table_sentinel
4388 __used __section("__clk_of_table_end");
4389
4390 static LIST_HEAD(of_clk_providers);
4391 static DEFINE_MUTEX(of_clk_mutex);
4392
of_clk_src_simple_get(struct of_phandle_args * clkspec,void * data)4393 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4394 void *data)
4395 {
4396 return data;
4397 }
4398 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4399
of_clk_hw_simple_get(struct of_phandle_args * clkspec,void * data)4400 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4401 {
4402 return data;
4403 }
4404 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4405
of_clk_src_onecell_get(struct of_phandle_args * clkspec,void * data)4406 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4407 {
4408 struct clk_onecell_data *clk_data = data;
4409 unsigned int idx = clkspec->args[0];
4410
4411 if (idx >= clk_data->clk_num) {
4412 pr_err("%s: invalid clock index %u\n", __func__, idx);
4413 return ERR_PTR(-EINVAL);
4414 }
4415
4416 return clk_data->clks[idx];
4417 }
4418 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4419
4420 struct clk_hw *
of_clk_hw_onecell_get(struct of_phandle_args * clkspec,void * data)4421 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4422 {
4423 struct clk_hw_onecell_data *hw_data = data;
4424 unsigned int idx = clkspec->args[0];
4425
4426 if (idx >= hw_data->num) {
4427 pr_err("%s: invalid index %u\n", __func__, idx);
4428 return ERR_PTR(-EINVAL);
4429 }
4430
4431 return hw_data->hws[idx];
4432 }
4433 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4434
4435 /**
4436 * of_clk_add_provider() - Register a clock provider for a node
4437 * @np: Device node pointer associated with clock provider
4438 * @clk_src_get: callback for decoding clock
4439 * @data: context pointer for @clk_src_get callback.
4440 *
4441 * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
4442 */
of_clk_add_provider(struct device_node * np,struct clk * (* clk_src_get)(struct of_phandle_args * clkspec,void * data),void * data)4443 int of_clk_add_provider(struct device_node *np,
4444 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4445 void *data),
4446 void *data)
4447 {
4448 struct of_clk_provider *cp;
4449 int ret;
4450
4451 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4452 if (!cp)
4453 return -ENOMEM;
4454
4455 cp->node = of_node_get(np);
4456 cp->data = data;
4457 cp->get = clk_src_get;
4458
4459 mutex_lock(&of_clk_mutex);
4460 list_add(&cp->link, &of_clk_providers);
4461 mutex_unlock(&of_clk_mutex);
4462 pr_debug("Added clock from %pOF\n", np);
4463
4464 clk_core_reparent_orphans();
4465
4466 ret = of_clk_set_defaults(np, true);
4467 if (ret < 0)
4468 of_clk_del_provider(np);
4469
4470 return ret;
4471 }
4472 EXPORT_SYMBOL_GPL(of_clk_add_provider);
4473
4474 /**
4475 * of_clk_add_hw_provider() - Register a clock provider for a node
4476 * @np: Device node pointer associated with clock provider
4477 * @get: callback for decoding clk_hw
4478 * @data: context pointer for @get callback.
4479 */
of_clk_add_hw_provider(struct device_node * np,struct clk_hw * (* get)(struct of_phandle_args * clkspec,void * data),void * data)4480 int of_clk_add_hw_provider(struct device_node *np,
4481 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4482 void *data),
4483 void *data)
4484 {
4485 struct of_clk_provider *cp;
4486 int ret;
4487
4488 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4489 if (!cp)
4490 return -ENOMEM;
4491
4492 cp->node = of_node_get(np);
4493 cp->data = data;
4494 cp->get_hw = get;
4495
4496 mutex_lock(&of_clk_mutex);
4497 list_add(&cp->link, &of_clk_providers);
4498 mutex_unlock(&of_clk_mutex);
4499 pr_debug("Added clk_hw provider from %pOF\n", np);
4500
4501 clk_core_reparent_orphans();
4502
4503 ret = of_clk_set_defaults(np, true);
4504 if (ret < 0)
4505 of_clk_del_provider(np);
4506
4507 return ret;
4508 }
4509 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
4510
devm_of_clk_release_provider(struct device * dev,void * res)4511 static void devm_of_clk_release_provider(struct device *dev, void *res)
4512 {
4513 of_clk_del_provider(*(struct device_node **)res);
4514 }
4515
4516 /*
4517 * We allow a child device to use its parent device as the clock provider node
4518 * for cases like MFD sub-devices where the child device driver wants to use
4519 * devm_*() APIs but not list the device in DT as a sub-node.
4520 */
get_clk_provider_node(struct device * dev)4521 static struct device_node *get_clk_provider_node(struct device *dev)
4522 {
4523 struct device_node *np, *parent_np;
4524
4525 np = dev->of_node;
4526 parent_np = dev->parent ? dev->parent->of_node : NULL;
4527
4528 if (!of_find_property(np, "#clock-cells", NULL))
4529 if (of_find_property(parent_np, "#clock-cells", NULL))
4530 np = parent_np;
4531
4532 return np;
4533 }
4534
4535 /**
4536 * devm_of_clk_add_hw_provider() - Managed clk provider node registration
4537 * @dev: Device acting as the clock provider (used for DT node and lifetime)
4538 * @get: callback for decoding clk_hw
4539 * @data: context pointer for @get callback
4540 *
4541 * Registers clock provider for given device's node. If the device has no DT
4542 * node or if the device node lacks of clock provider information (#clock-cells)
4543 * then the parent device's node is scanned for this information. If parent node
4544 * has the #clock-cells then it is used in registration. Provider is
4545 * automatically released at device exit.
4546 *
4547 * Return: 0 on success or an errno on failure.
4548 */
devm_of_clk_add_hw_provider(struct device * dev,struct clk_hw * (* get)(struct of_phandle_args * clkspec,void * data),void * data)4549 int devm_of_clk_add_hw_provider(struct device *dev,
4550 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4551 void *data),
4552 void *data)
4553 {
4554 struct device_node **ptr, *np;
4555 int ret;
4556
4557 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
4558 GFP_KERNEL);
4559 if (!ptr)
4560 return -ENOMEM;
4561
4562 np = get_clk_provider_node(dev);
4563 ret = of_clk_add_hw_provider(np, get, data);
4564 if (!ret) {
4565 *ptr = np;
4566 devres_add(dev, ptr);
4567 } else {
4568 devres_free(ptr);
4569 }
4570
4571 return ret;
4572 }
4573 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
4574
4575 /**
4576 * of_clk_del_provider() - Remove a previously registered clock provider
4577 * @np: Device node pointer associated with clock provider
4578 */
of_clk_del_provider(struct device_node * np)4579 void of_clk_del_provider(struct device_node *np)
4580 {
4581 struct of_clk_provider *cp;
4582
4583 mutex_lock(&of_clk_mutex);
4584 list_for_each_entry(cp, &of_clk_providers, link) {
4585 if (cp->node == np) {
4586 list_del(&cp->link);
4587 of_node_put(cp->node);
4588 kfree(cp);
4589 break;
4590 }
4591 }
4592 mutex_unlock(&of_clk_mutex);
4593 }
4594 EXPORT_SYMBOL_GPL(of_clk_del_provider);
4595
devm_clk_provider_match(struct device * dev,void * res,void * data)4596 static int devm_clk_provider_match(struct device *dev, void *res, void *data)
4597 {
4598 struct device_node **np = res;
4599
4600 if (WARN_ON(!np || !*np))
4601 return 0;
4602
4603 return *np == data;
4604 }
4605
4606 /**
4607 * devm_of_clk_del_provider() - Remove clock provider registered using devm
4608 * @dev: Device to whose lifetime the clock provider was bound
4609 */
devm_of_clk_del_provider(struct device * dev)4610 void devm_of_clk_del_provider(struct device *dev)
4611 {
4612 int ret;
4613 struct device_node *np = get_clk_provider_node(dev);
4614
4615 ret = devres_release(dev, devm_of_clk_release_provider,
4616 devm_clk_provider_match, np);
4617
4618 WARN_ON(ret);
4619 }
4620 EXPORT_SYMBOL(devm_of_clk_del_provider);
4621
4622 /**
4623 * of_parse_clkspec() - Parse a DT clock specifier for a given device node
4624 * @np: device node to parse clock specifier from
4625 * @index: index of phandle to parse clock out of. If index < 0, @name is used
4626 * @name: clock name to find and parse. If name is NULL, the index is used
4627 * @out_args: Result of parsing the clock specifier
4628 *
4629 * Parses a device node's "clocks" and "clock-names" properties to find the
4630 * phandle and cells for the index or name that is desired. The resulting clock
4631 * specifier is placed into @out_args, or an errno is returned when there's a
4632 * parsing error. The @index argument is ignored if @name is non-NULL.
4633 *
4634 * Example:
4635 *
4636 * phandle1: clock-controller@1 {
4637 * #clock-cells = <2>;
4638 * }
4639 *
4640 * phandle2: clock-controller@2 {
4641 * #clock-cells = <1>;
4642 * }
4643 *
4644 * clock-consumer@3 {
4645 * clocks = <&phandle1 1 2 &phandle2 3>;
4646 * clock-names = "name1", "name2";
4647 * }
4648 *
4649 * To get a device_node for `clock-controller@2' node you may call this
4650 * function a few different ways:
4651 *
4652 * of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
4653 * of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
4654 * of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
4655 *
4656 * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
4657 * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
4658 * the "clock-names" property of @np.
4659 */
of_parse_clkspec(const struct device_node * np,int index,const char * name,struct of_phandle_args * out_args)4660 static int of_parse_clkspec(const struct device_node *np, int index,
4661 const char *name, struct of_phandle_args *out_args)
4662 {
4663 int ret = -ENOENT;
4664
4665 /* Walk up the tree of devices looking for a clock property that matches */
4666 while (np) {
4667 /*
4668 * For named clocks, first look up the name in the
4669 * "clock-names" property. If it cannot be found, then index
4670 * will be an error code and of_parse_phandle_with_args() will
4671 * return -EINVAL.
4672 */
4673 if (name)
4674 index = of_property_match_string(np, "clock-names", name);
4675 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4676 index, out_args);
4677 if (!ret)
4678 break;
4679 if (name && index >= 0)
4680 break;
4681
4682 /*
4683 * No matching clock found on this node. If the parent node
4684 * has a "clock-ranges" property, then we can try one of its
4685 * clocks.
4686 */
4687 np = np->parent;
4688 if (np && !of_get_property(np, "clock-ranges", NULL))
4689 break;
4690 index = 0;
4691 }
4692
4693 return ret;
4694 }
4695
4696 static struct clk_hw *
__of_clk_get_hw_from_provider(struct of_clk_provider * provider,struct of_phandle_args * clkspec)4697 __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
4698 struct of_phandle_args *clkspec)
4699 {
4700 struct clk *clk;
4701
4702 if (provider->get_hw)
4703 return provider->get_hw(clkspec, provider->data);
4704
4705 clk = provider->get(clkspec, provider->data);
4706 if (IS_ERR(clk))
4707 return ERR_CAST(clk);
4708 return __clk_get_hw(clk);
4709 }
4710
4711 static struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args * clkspec)4712 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
4713 {
4714 struct of_clk_provider *provider;
4715 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
4716
4717 if (!clkspec)
4718 return ERR_PTR(-EINVAL);
4719
4720 mutex_lock(&of_clk_mutex);
4721 list_for_each_entry(provider, &of_clk_providers, link) {
4722 if (provider->node == clkspec->np) {
4723 hw = __of_clk_get_hw_from_provider(provider, clkspec);
4724 if (!IS_ERR(hw))
4725 break;
4726 }
4727 }
4728 mutex_unlock(&of_clk_mutex);
4729
4730 return hw;
4731 }
4732
4733 /**
4734 * of_clk_get_from_provider() - Lookup a clock from a clock provider
4735 * @clkspec: pointer to a clock specifier data structure
4736 *
4737 * This function looks up a struct clk from the registered list of clock
4738 * providers, an input is a clock specifier data structure as returned
4739 * from the of_parse_phandle_with_args() function call.
4740 */
of_clk_get_from_provider(struct of_phandle_args * clkspec)4741 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4742 {
4743 struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
4744
4745 return clk_hw_create_clk(NULL, hw, NULL, __func__);
4746 }
4747 EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
4748
of_clk_get_hw(struct device_node * np,int index,const char * con_id)4749 struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
4750 const char *con_id)
4751 {
4752 int ret;
4753 struct clk_hw *hw;
4754 struct of_phandle_args clkspec;
4755
4756 ret = of_parse_clkspec(np, index, con_id, &clkspec);
4757 if (ret)
4758 return ERR_PTR(ret);
4759
4760 hw = of_clk_get_hw_from_clkspec(&clkspec);
4761 of_node_put(clkspec.np);
4762
4763 return hw;
4764 }
4765
__of_clk_get(struct device_node * np,int index,const char * dev_id,const char * con_id)4766 static struct clk *__of_clk_get(struct device_node *np,
4767 int index, const char *dev_id,
4768 const char *con_id)
4769 {
4770 struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
4771
4772 return clk_hw_create_clk(NULL, hw, dev_id, con_id);
4773 }
4774
of_clk_get(struct device_node * np,int index)4775 struct clk *of_clk_get(struct device_node *np, int index)
4776 {
4777 return __of_clk_get(np, index, np->full_name, NULL);
4778 }
4779 EXPORT_SYMBOL(of_clk_get);
4780
4781 /**
4782 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
4783 * @np: pointer to clock consumer node
4784 * @name: name of consumer's clock input, or NULL for the first clock reference
4785 *
4786 * This function parses the clocks and clock-names properties,
4787 * and uses them to look up the struct clk from the registered list of clock
4788 * providers.
4789 */
of_clk_get_by_name(struct device_node * np,const char * name)4790 struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
4791 {
4792 if (!np)
4793 return ERR_PTR(-ENOENT);
4794
4795 return __of_clk_get(np, 0, np->full_name, name);
4796 }
4797 EXPORT_SYMBOL(of_clk_get_by_name);
4798
4799 /**
4800 * of_clk_get_parent_count() - Count the number of clocks a device node has
4801 * @np: device node to count
4802 *
4803 * Returns: The number of clocks that are possible parents of this node
4804 */
of_clk_get_parent_count(const struct device_node * np)4805 unsigned int of_clk_get_parent_count(const struct device_node *np)
4806 {
4807 int count;
4808
4809 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
4810 if (count < 0)
4811 return 0;
4812
4813 return count;
4814 }
4815 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
4816
of_clk_get_parent_name(const struct device_node * np,int index)4817 const char *of_clk_get_parent_name(const struct device_node *np, int index)
4818 {
4819 struct of_phandle_args clkspec;
4820 struct property *prop;
4821 const char *clk_name;
4822 const __be32 *vp;
4823 u32 pv;
4824 int rc;
4825 int count;
4826 struct clk *clk;
4827
4828 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
4829 &clkspec);
4830 if (rc)
4831 return NULL;
4832
4833 index = clkspec.args_count ? clkspec.args[0] : 0;
4834 count = 0;
4835
4836 /* if there is an indices property, use it to transfer the index
4837 * specified into an array offset for the clock-output-names property.
4838 */
4839 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
4840 if (index == pv) {
4841 index = count;
4842 break;
4843 }
4844 count++;
4845 }
4846 /* We went off the end of 'clock-indices' without finding it */
4847 if (prop && !vp)
4848 return NULL;
4849
4850 if (of_property_read_string_index(clkspec.np, "clock-output-names",
4851 index,
4852 &clk_name) < 0) {
4853 /*
4854 * Best effort to get the name if the clock has been
4855 * registered with the framework. If the clock isn't
4856 * registered, we return the node name as the name of
4857 * the clock as long as #clock-cells = 0.
4858 */
4859 clk = of_clk_get_from_provider(&clkspec);
4860 if (IS_ERR(clk)) {
4861 if (clkspec.args_count == 0)
4862 clk_name = clkspec.np->name;
4863 else
4864 clk_name = NULL;
4865 } else {
4866 clk_name = __clk_get_name(clk);
4867 clk_put(clk);
4868 }
4869 }
4870
4871
4872 of_node_put(clkspec.np);
4873 return clk_name;
4874 }
4875 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
4876
4877 /**
4878 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
4879 * number of parents
4880 * @np: Device node pointer associated with clock provider
4881 * @parents: pointer to char array that hold the parents' names
4882 * @size: size of the @parents array
4883 *
4884 * Return: number of parents for the clock node.
4885 */
of_clk_parent_fill(struct device_node * np,const char ** parents,unsigned int size)4886 int of_clk_parent_fill(struct device_node *np, const char **parents,
4887 unsigned int size)
4888 {
4889 unsigned int i = 0;
4890
4891 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
4892 i++;
4893
4894 return i;
4895 }
4896 EXPORT_SYMBOL_GPL(of_clk_parent_fill);
4897
4898 struct clock_provider {
4899 void (*clk_init_cb)(struct device_node *);
4900 struct device_node *np;
4901 struct list_head node;
4902 };
4903
4904 /*
4905 * This function looks for a parent clock. If there is one, then it
4906 * checks that the provider for this parent clock was initialized, in
4907 * this case the parent clock will be ready.
4908 */
parent_ready(struct device_node * np)4909 static int parent_ready(struct device_node *np)
4910 {
4911 int i = 0;
4912
4913 while (true) {
4914 struct clk *clk = of_clk_get(np, i);
4915
4916 /* this parent is ready we can check the next one */
4917 if (!IS_ERR(clk)) {
4918 clk_put(clk);
4919 i++;
4920 continue;
4921 }
4922
4923 /* at least one parent is not ready, we exit now */
4924 if (PTR_ERR(clk) == -EPROBE_DEFER)
4925 return 0;
4926
4927 /*
4928 * Here we make assumption that the device tree is
4929 * written correctly. So an error means that there is
4930 * no more parent. As we didn't exit yet, then the
4931 * previous parent are ready. If there is no clock
4932 * parent, no need to wait for them, then we can
4933 * consider their absence as being ready
4934 */
4935 return 1;
4936 }
4937 }
4938
4939 /**
4940 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
4941 * @np: Device node pointer associated with clock provider
4942 * @index: clock index
4943 * @flags: pointer to top-level framework flags
4944 *
4945 * Detects if the clock-critical property exists and, if so, sets the
4946 * corresponding CLK_IS_CRITICAL flag.
4947 *
4948 * Do not use this function. It exists only for legacy Device Tree
4949 * bindings, such as the one-clock-per-node style that are outdated.
4950 * Those bindings typically put all clock data into .dts and the Linux
4951 * driver has no clock data, thus making it impossible to set this flag
4952 * correctly from the driver. Only those drivers may call
4953 * of_clk_detect_critical from their setup functions.
4954 *
4955 * Return: error code or zero on success
4956 */
of_clk_detect_critical(struct device_node * np,int index,unsigned long * flags)4957 int of_clk_detect_critical(struct device_node *np, int index,
4958 unsigned long *flags)
4959 {
4960 struct property *prop;
4961 const __be32 *cur;
4962 uint32_t idx;
4963
4964 if (!np || !flags)
4965 return -EINVAL;
4966
4967 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
4968 if (index == idx)
4969 *flags |= CLK_IS_CRITICAL;
4970
4971 return 0;
4972 }
4973
4974 /**
4975 * of_clk_init() - Scan and init clock providers from the DT
4976 * @matches: array of compatible values and init functions for providers.
4977 *
4978 * This function scans the device tree for matching clock providers
4979 * and calls their initialization functions. It also does it by trying
4980 * to follow the dependencies.
4981 */
of_clk_init(const struct of_device_id * matches)4982 void __init of_clk_init(const struct of_device_id *matches)
4983 {
4984 const struct of_device_id *match;
4985 struct device_node *np;
4986 struct clock_provider *clk_provider, *next;
4987 bool is_init_done;
4988 bool force = false;
4989 LIST_HEAD(clk_provider_list);
4990
4991 if (!matches)
4992 matches = &__clk_of_table;
4993
4994 /* First prepare the list of the clocks providers */
4995 for_each_matching_node_and_match(np, matches, &match) {
4996 struct clock_provider *parent;
4997
4998 if (!of_device_is_available(np))
4999 continue;
5000
5001 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
5002 if (!parent) {
5003 list_for_each_entry_safe(clk_provider, next,
5004 &clk_provider_list, node) {
5005 list_del(&clk_provider->node);
5006 of_node_put(clk_provider->np);
5007 kfree(clk_provider);
5008 }
5009 of_node_put(np);
5010 return;
5011 }
5012
5013 parent->clk_init_cb = match->data;
5014 parent->np = of_node_get(np);
5015 list_add_tail(&parent->node, &clk_provider_list);
5016 }
5017
5018 while (!list_empty(&clk_provider_list)) {
5019 is_init_done = false;
5020 list_for_each_entry_safe(clk_provider, next,
5021 &clk_provider_list, node) {
5022 if (force || parent_ready(clk_provider->np)) {
5023
5024 /* Don't populate platform devices */
5025 of_node_set_flag(clk_provider->np,
5026 OF_POPULATED);
5027
5028 clk_provider->clk_init_cb(clk_provider->np);
5029 of_clk_set_defaults(clk_provider->np, true);
5030
5031 list_del(&clk_provider->node);
5032 of_node_put(clk_provider->np);
5033 kfree(clk_provider);
5034 is_init_done = true;
5035 }
5036 }
5037
5038 /*
5039 * We didn't manage to initialize any of the
5040 * remaining providers during the last loop, so now we
5041 * initialize all the remaining ones unconditionally
5042 * in case the clock parent was not mandatory
5043 */
5044 if (!is_init_done)
5045 force = true;
5046 }
5047 }
5048 #endif
5049