• Home
  • Raw
  • Download

Lines Matching +full:protected +full:- +full:clocks

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
6 * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
10 #include <linux/clk-provider.h>
11 #include <linux/clk/clk-conf.h>
113 if (!core->rpm_enabled)
116 ret = pm_runtime_get_sync(core->dev);
118 pm_runtime_put_noidle(core->dev);
126 if (!core->rpm_enabled)
129 pm_runtime_put_sync(core->dev);
153 if (--prepare_refcnt)
193 if (--enable_refcnt) {
203 return core->protect_count;
211 * .is_prepared is optional for clocks that can prepare
214 if (!core->ops->is_prepared)
215 return core->prepare_count;
218 ret = core->ops->is_prepared(core->hw);
230 * .is_enabled is only mandatory for clocks that gate
233 if (!core->ops->is_enabled)
234 return core->enable_count;
246 if (core->rpm_enabled) {
247 pm_runtime_get_noresume(core->dev);
248 if (!pm_runtime_active(core->dev)) {
254 ret = core->ops->is_enabled(core->hw);
256 if (core->rpm_enabled)
257 pm_runtime_put(core->dev);
266 return !clk ? NULL : clk->core->name;
272 return hw->core->name;
278 return !clk ? NULL : clk->core->hw;
284 return hw->core->num_parents;
290 return hw->core->parent ? hw->core->parent->hw : NULL;
300 if (!strcmp(core->name, name))
303 hlist_for_each_entry(child, &core->children, child_node) {
347 return -ENOENT;
352 return ERR_PTR(-ENOENT);
357 * clk_core_get - Find the clk_core parent of a clk
364 * node's 'clock-names' property or as the 'con_id' matching the device's
369 * clock-controller@c001 that has a clk_init_data::parent_data array
371 * clock-controller@f00abcd without needing to get the globally unique name of
374 * parent: clock-controller@f00abcd {
376 * #clock-cells = <0>;
379 * clock-controller@c001 {
381 * clocks = <&parent>;
382 * clock-names = "xtal";
383 * #clock-cells = <1>;
386 * Returns: -ENOENT when the provider can't be found or the clk doesn't
394 const char *name = core->parents[p_index].fw_name;
395 int index = core->parents[p_index].index;
396 struct clk_hw *hw = ERR_PTR(-ENOENT);
397 struct device *dev = core->dev;
399 struct device_node *np = core->of_node;
417 return hw->core;
422 struct clk_parent_map *entry = &core->parents[index];
423 struct clk_core *parent = ERR_PTR(-ENOENT);
425 if (entry->hw) {
426 parent = entry->hw->core;
433 parent = ERR_PTR(-EPROBE_DEFER);
436 if (PTR_ERR(parent) == -ENOENT && entry->name)
437 parent = clk_core_lookup(entry->name);
442 entry->core = parent;
448 if (!core || index >= core->num_parents || !core->parents)
451 if (!core->parents[index].core)
454 return core->parents[index].core;
462 parent = clk_core_get_parent_by_index(hw->core, index);
464 return !parent ? NULL : parent->hw;
470 return !clk ? 0 : clk->core->enable_count;
478 if (!core->num_parents || core->parent)
479 return core->rate;
491 return clk_core_get_rate_nolock(hw->core);
500 return core->accuracy;
505 return hw->core->flags;
511 return clk_core_is_prepared(hw->core);
517 return clk_core_rate_is_protected(hw->core);
523 return clk_core_is_enabled(hw->core);
532 return clk_core_is_enabled(clk->core);
540 return abs(now - rate) < abs(best - rate);
549 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
555 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
556 parent = core->parent;
557 if (core->flags & CLK_SET_RATE_PARENT) {
558 ret = __clk_determine_rate(parent ? parent->hw : NULL,
574 num_parents = core->num_parents;
580 if (core->flags & CLK_SET_RATE_PARENT) {
582 ret = __clk_determine_rate(parent->hw, &parent_req);
589 if (mux_is_better_rate(req->rate, parent_req.rate,
597 return -EINVAL;
601 req->best_parent_hw = best_parent->hw;
602 req->best_parent_rate = best;
603 req->rate = best;
613 return !core ? NULL : core->hw->clk;
624 *min_rate = core->min_rate;
625 *max_rate = core->max_rate;
627 hlist_for_each_entry(clk_user, &core->clks, clks_node)
628 *min_rate = max(*min_rate, clk_user->min_rate);
630 hlist_for_each_entry(clk_user, &core->clks, clks_node)
631 *max_rate = min(*max_rate, clk_user->max_rate);
642 if (min_rate > core->max_rate || max_rate < core->min_rate)
645 hlist_for_each_entry(user, &core->clks, clks_node)
646 if (min_rate > user->max_rate || max_rate < user->min_rate)
655 hw->core->min_rate = min_rate;
656 hw->core->max_rate = max_rate;
661 * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
669 * Returns: 0 on success, -EERROR value on error
694 if (WARN(core->protect_count == 0,
695 "%s already unprotected\n", core->name))
698 if (--core->protect_count > 0)
701 clk_core_rate_unprotect(core->parent);
711 return -EINVAL;
713 if (core->protect_count == 0)
716 ret = core->protect_count;
717 core->protect_count = 1;
724 * clk_rate_exclusive_put - release exclusivity over clock rate control
729 * clock which could result in a rate change or rate glitch. Exclusive clocks
731 * further up the parent chain of clocks. As a result, clocks up parent chain
752 if (WARN_ON(clk->exclusive_count <= 0))
755 clk_core_rate_unprotect(clk->core);
756 clk->exclusive_count--;
769 if (core->protect_count == 0)
770 clk_core_rate_protect(core->parent);
772 core->protect_count++;
786 core->protect_count = count;
790 * clk_rate_exclusive_get - get exclusivity over the clk rate control
795 * clock which could result in a rate change or rate glitch. Exclusive clocks
797 * further up the parent chain of clocks. As a result, clocks up parent chain
805 * Returns 0 on success, -EERROR otherwise
813 clk_core_rate_protect(clk->core);
814 clk->exclusive_count++;
828 if (WARN(core->prepare_count == 0,
829 "%s already unprepared\n", core->name))
832 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
833 "Unpreparing critical %s\n", core->name))
836 if (core->flags & CLK_SET_RATE_GATE)
839 if (--core->prepare_count > 0)
842 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
846 if (core->ops->unprepare)
847 core->ops->unprepare(core->hw);
850 clk_core_unprepare(core->parent);
862 * clk_unprepare - undo preparation of a clock source
877 clk_core_unprepare_lock(clk->core);
890 if (core->prepare_count == 0) {
895 ret = clk_core_prepare(core->parent);
901 if (core->ops->prepare)
902 ret = core->ops->prepare(core->hw);
910 core->prepare_count++;
919 if (core->flags & CLK_SET_RATE_GATE)
924 clk_core_unprepare(core->parent);
942 * clk_prepare - prepare a clock source
951 * Returns 0 on success, -EERROR otherwise.
958 return clk_core_prepare_lock(clk->core);
969 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
972 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
973 "Disabling critical %s\n", core->name))
976 if (--core->enable_count > 0)
981 if (core->ops->disable)
982 core->ops->disable(core->hw);
986 clk_core_disable(core->parent);
999 * clk_disable - gate a clock
1005 * SoC-internal clk which is controlled via simple register writes. In the
1015 clk_core_disable_lock(clk->core);
1028 if (WARN(core->prepare_count == 0,
1029 "Enabling unprepared %s\n", core->name))
1030 return -ESHUTDOWN;
1032 if (core->enable_count == 0) {
1033 ret = clk_core_enable(core->parent);
1040 if (core->ops->enable)
1041 ret = core->ops->enable(core->hw);
1046 clk_core_disable(core->parent);
1051 core->enable_count++;
1068 * clk_gate_restore_context - restore context for poweroff
1072 * the gate clocks based on the enable_count. This is done in cases
1075 * helps restore the state of gate clocks.
1079 struct clk_core *core = hw->core;
1081 if (core->enable_count)
1082 core->ops->enable(hw);
1084 core->ops->disable(hw);
1093 hlist_for_each_entry(child, &core->children, child_node) {
1099 if (core->ops && core->ops->save_context)
1100 ret = core->ops->save_context(core->hw);
1109 if (core->ops && core->ops->restore_context)
1110 core->ops->restore_context(core->hw);
1112 hlist_for_each_entry(child, &core->children, child_node)
1117 * clk_save_context - save clock context for poweroff
1145 * clk_restore_context - restore clock context after poweroff
1163 * clk_enable - ungate a clock
1168 * if the operation will never sleep. One example is a SoC-internal clk which
1172 * must be called before clk_enable. Returns 0 on success, -EERROR
1180 return clk_core_enable_lock(clk->core);
1211 hlist_for_each_entry(child, &core->children, child_node)
1214 if (core->prepare_count)
1217 if (core->flags & CLK_IGNORE_UNUSED)
1225 if (core->ops->unprepare_unused)
1226 core->ops->unprepare_unused(core->hw);
1227 else if (core->ops->unprepare)
1228 core->ops->unprepare(core->hw);
1242 hlist_for_each_entry(child, &core->children, child_node)
1245 if (core->flags & CLK_OPS_PARENT_ENABLE)
1246 clk_core_prepare_enable(core->parent);
1253 if (core->enable_count)
1256 if (core->flags & CLK_IGNORE_UNUSED)
1260 * some gate clocks have special needs during the disable-unused
1266 if (core->ops->disable_unused)
1267 core->ops->disable_unused(core->hw);
1268 else if (core->ops->disable)
1269 core->ops->disable(core->hw);
1277 if (core->flags & CLK_OPS_PARENT_ENABLE)
1278 clk_core_disable_unprepare(core->parent);
1294 pr_warn("clk: Not disabling unused clocks\n");
1330 * - if the provider is not protected at all
1331 * - if the calling consumer is the only one which has exclusivity
1335 req->rate = core->rate;
1336 } else if (core->ops->determine_rate) {
1337 return core->ops->determine_rate(core->hw, req);
1338 } else if (core->ops->round_rate) {
1339 rate = core->ops->round_rate(core->hw, req->rate,
1340 &req->best_parent_rate);
1344 req->rate = rate;
1346 return -EINVAL;
1360 parent = core->parent;
1362 req->best_parent_hw = parent->hw;
1363 req->best_parent_rate = parent->rate;
1365 req->best_parent_hw = NULL;
1366 req->best_parent_rate = 0;
1372 return core->ops->determine_rate || core->ops->round_rate;
1381 req->rate = 0;
1389 else if (core->flags & CLK_SET_RATE_PARENT)
1390 return clk_core_round_rate_nolock(core->parent, req);
1392 req->rate = core->rate;
1397 * __clk_determine_rate - get the closest rate actually supported by a clock
1406 req->rate = 0;
1410 return clk_core_round_rate_nolock(hw->core, req);
1415 * clk_hw_round_rate() - round the given rate for a hw clk
1434 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1437 ret = clk_core_round_rate_nolock(hw->core, &req);
1446 * clk_round_rate - round the given rate for a clk
1464 if (clk->exclusive_count)
1465 clk_core_rate_unprotect(clk->core);
1467 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1470 ret = clk_core_round_rate_nolock(clk->core, &req);
1472 if (clk->exclusive_count)
1473 clk_core_rate_protect(clk->core);
1485 * __clk_notify - call clk notifier chain
1491 * Triggers a notifier call chain on the clk rate-change notification
1509 if (cn->clk->core == core) {
1510 cnd.clk = cn->clk;
1511 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1537 if (core->parent)
1538 parent_accuracy = core->parent->accuracy;
1540 if (core->ops->recalc_accuracy)
1541 core->accuracy = core->ops->recalc_accuracy(core->hw,
1544 core->accuracy = parent_accuracy;
1546 hlist_for_each_entry(child, &core->children, child_node)
1552 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1559 * clk_get_accuracy - return the accuracy of clk
1575 accuracy = clk_core_get_accuracy_recalc(clk->core);
1587 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1588 rate = core->ops->recalc_rate(core->hw, parent_rate);
1614 old_rate = core->rate;
1616 if (core->parent)
1617 parent_rate = core->parent->rate;
1619 core->rate = clk_recalc(core, parent_rate);
1625 if (core->notifier_count && msg)
1626 __clk_notify(core, msg, old_rate, core->rate);
1628 hlist_for_each_entry(child, &core->children, child_node)
1634 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1641 * clk_get_rate - return the rate of clk
1656 rate = clk_core_get_rate_recalc(clk->core);
1669 return -EINVAL;
1671 for (i = 0; i < core->num_parents; i++) {
1673 if (core->parents[i].core == parent)
1677 if (core->parents[i].core)
1681 if (core->parents[i].hw) {
1682 if (core->parents[i].hw == parent->hw)
1694 if (core->parents[i].name &&
1695 !strcmp(parent->name, core->parents[i].name))
1699 if (i == core->num_parents)
1700 return -EINVAL;
1702 core->parents[i].core = parent;
1707 * clk_hw_get_parent_index - return the index of the parent clock
1710 * Fetches and returns the index of parent clock. Returns -EINVAL if the given
1718 return -EINVAL;
1720 return clk_fetch_parent_index(hw->core, parent->core);
1731 core->orphan = is_orphan;
1733 hlist_for_each_entry(child, &core->children, child_node)
1739 bool was_orphan = core->orphan;
1741 hlist_del(&core->child_node);
1744 bool becomes_orphan = new_parent->orphan;
1747 if (new_parent->new_child == core)
1748 new_parent->new_child = NULL;
1750 hlist_add_head(&core->child_node, &new_parent->children);
1755 hlist_add_head(&core->child_node, &clk_orphan_list);
1760 core->parent = new_parent;
1767 struct clk_core *old_parent = core->parent;
1790 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1796 if (core->prepare_count) {
1817 if (core->prepare_count) {
1822 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1823 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1841 if (parent && core->ops->set_parent)
1842 ret = core->ops->set_parent(core->hw, p_index);
1869 * pre-rate change notifications and returns early if no clks in the
1886 if (core->notifier_count)
1887 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1891 __func__, core->name, ret);
1895 hlist_for_each_entry(child, &core->children, child_node) {
1910 core->new_rate = new_rate;
1911 core->new_parent = new_parent;
1912 core->new_parent_index = p_index;
1914 core->new_child = NULL;
1915 if (new_parent && new_parent != core->parent)
1916 new_parent->new_child = core;
1918 hlist_for_each_entry(child, &core->children, child_node) {
1919 child->new_rate = clk_recalc(child, new_rate);
1920 clk_calc_subtree(child, child->new_rate, NULL, 0);
1945 parent = old_parent = core->parent;
1947 best_parent_rate = parent->rate;
1967 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1971 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1972 /* pass-through clock without adjustable parent */
1973 core->new_rate = core->rate;
1976 /* pass-through clock with adjustable parent */
1978 new_rate = parent->new_rate;
1982 /* some clocks must be gated to change parent */
1984 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1986 __func__, core->name);
1991 if (parent && core->num_parents > 1) {
1995 __func__, parent->name, core->name);
2000 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
2001 best_parent_rate != parent->rate)
2021 if (core->rate == core->new_rate)
2024 if (core->notifier_count) {
2025 ret = __clk_notify(core, event, core->rate, core->new_rate);
2030 hlist_for_each_entry(child, &core->children, child_node) {
2032 if (child->new_parent && child->new_parent != core)
2039 /* handle the new child who might not be in core->children yet */
2040 if (core->new_child) {
2041 tmp_clk = clk_propagate_rate_change(core->new_child, event);
2063 old_rate = core->rate;
2065 if (core->new_parent) {
2066 parent = core->new_parent;
2067 best_parent_rate = core->new_parent->rate;
2068 } else if (core->parent) {
2069 parent = core->parent;
2070 best_parent_rate = core->parent->rate;
2076 if (core->flags & CLK_SET_RATE_UNGATE) {
2085 if (core->new_parent && core->new_parent != core->parent) {
2086 old_parent = __clk_set_parent_before(core, core->new_parent);
2087 trace_clk_set_parent(core, core->new_parent);
2089 if (core->ops->set_rate_and_parent) {
2091 core->ops->set_rate_and_parent(core->hw, core->new_rate,
2093 core->new_parent_index);
2094 } else if (core->ops->set_parent) {
2095 core->ops->set_parent(core->hw, core->new_parent_index);
2098 trace_clk_set_parent_complete(core, core->new_parent);
2099 __clk_set_parent_after(core, core->new_parent, old_parent);
2102 if (core->flags & CLK_OPS_PARENT_ENABLE)
2105 trace_clk_set_rate(core, core->new_rate);
2107 if (!skip_set_rate && core->ops->set_rate)
2108 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2110 trace_clk_set_rate_complete(core, core->new_rate);
2112 core->rate = clk_recalc(core, best_parent_rate);
2114 if (core->flags & CLK_SET_RATE_UNGATE) {
2123 if (core->flags & CLK_OPS_PARENT_ENABLE)
2126 if (core->notifier_count && old_rate != core->rate)
2127 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2129 if (core->flags & CLK_RECALC_NEW_RATES)
2130 (void)clk_calc_new_rates(core, core->new_rate);
2136 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2138 if (child->new_parent && child->new_parent != core)
2143 /* handle the new child who might not be in core->children yet */
2144 if (core->new_child)
2145 clk_change_rate(core->new_child);
2193 /* fail on a direct rate set of a protected provider */
2195 return -EBUSY;
2200 return -EINVAL;
2210 fail_clk->name);
2212 ret = -EBUSY;
2219 core->req_rate = req_rate;
2227 * clk_set_rate - specify a new rate for clk
2243 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
2245 * Returns 0 on success, -EERROR otherwise.
2257 if (clk->exclusive_count)
2258 clk_core_rate_unprotect(clk->core);
2260 ret = clk_core_set_rate_nolock(clk->core, rate);
2262 if (clk->exclusive_count)
2263 clk_core_rate_protect(clk->core);
2272 * clk_set_rate_exclusive - specify a new rate and get exclusive control
2288 * Returns 0 on success, -EERROR otherwise.
2306 ret = clk_core_set_rate_nolock(clk->core, rate);
2308 clk_core_rate_protect(clk->core);
2309 clk->exclusive_count++;
2319 * clk_set_rate_range - set a rate range for a clock source
2336 __func__, clk->core->name, clk->dev_id, clk->con_id,
2338 return -EINVAL;
2343 if (clk->exclusive_count)
2344 clk_core_rate_unprotect(clk->core);
2347 old_min = clk->min_rate;
2348 old_max = clk->max_rate;
2349 clk->min_rate = min;
2350 clk->max_rate = max;
2352 if (!clk_core_check_boundaries(clk->core, min, max)) {
2353 ret = -EINVAL;
2357 rate = clk_core_get_rate_nolock(clk->core);
2364 * usual reason (clock broken, clock protected, etc) but also
2366 * - round_rate() was not favorable and fell on the wrong
2368 * - the determine_rate() callback does not really check for
2377 ret = clk_core_set_rate_nolock(clk->core, rate);
2380 clk->min_rate = old_min;
2381 clk->max_rate = old_max;
2386 if (clk->exclusive_count)
2387 clk_core_rate_protect(clk->core);
2396 * clk_set_min_rate - set a minimum clock rate for a clock source
2407 return clk_set_rate_range(clk, rate, clk->max_rate);
2412 * clk_set_max_rate - set a maximum clock rate for a clock source
2423 return clk_set_rate_range(clk, clk->min_rate, rate);
2428 * clk_get_parent - return the parent of a clk
2431 * Simply returns clk->parent. Returns NULL if clk is NULL.
2441 /* TODO: Create a per-user clk and change callers to call clk_put */
2442 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2453 if (core->num_parents > 1 && core->ops->get_parent)
2454 index = core->ops->get_parent(core->hw);
2472 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2476 * clk_has_parent - check if a clock is a possible parent for another
2490 /* NULL clocks should be nops, so return success if either is NULL. */
2494 core = clk->core;
2495 parent_core = parent->core;
2498 if (core->parent == parent_core)
2501 for (i = 0; i < core->num_parents; i++)
2502 if (!strcmp(core->parents[i].name, parent_core->name))
2521 if (core->parent == parent)
2524 /* verify ops for multi-parent clks */
2525 if (core->num_parents > 1 && !core->ops->set_parent)
2526 return -EPERM;
2528 /* check that we are allowed to re-parent if the clock is in use */
2529 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2530 return -EBUSY;
2533 return -EBUSY;
2540 __func__, parent->name, core->name);
2543 p_rate = parent->rate;
2557 /* do the re-parent */
2576 return clk_core_set_parent_nolock(hw->core, parent->core);
2581 * clk_set_parent - switch the parent of a mux clk
2585 * Re-parent clk to use parent as its new input source. If clk is in
2595 * Returns 0 on success, -EERROR otherwise.
2606 if (clk->exclusive_count)
2607 clk_core_rate_unprotect(clk->core);
2609 ret = clk_core_set_parent_nolock(clk->core,
2610 parent ? parent->core : NULL);
2612 if (clk->exclusive_count)
2613 clk_core_rate_protect(clk->core);
2623 int ret = -EINVAL;
2631 return -EBUSY;
2635 if (core->ops->set_phase) {
2636 ret = core->ops->set_phase(core->hw, degrees);
2638 core->phase = degrees;
2647 * clk_set_phase - adjust the phase shift of a clock signal
2652 * degrees. Returns 0 on success, -EERROR otherwise.
2656 * phase locked-loop clock signal generators we may shift phase with
2680 if (clk->exclusive_count)
2681 clk_core_rate_unprotect(clk->core);
2683 ret = clk_core_set_phase_nolock(clk->core, degrees);
2685 if (clk->exclusive_count)
2686 clk_core_rate_protect(clk->core);
2699 if (!core->ops->get_phase)
2703 ret = core->ops->get_phase(core->hw);
2705 core->phase = ret;
2711 * clk_get_phase - return the phase shift of a clock signal
2715 * -EERROR.
2725 ret = clk_core_get_phase(clk->core);
2735 core->duty.num = 1;
2736 core->duty.den = 2;
2743 struct clk_duty *duty = &core->duty;
2746 if (!core->ops->get_duty_cycle)
2749 ret = core->ops->get_duty_cycle(core->hw, duty);
2754 if (duty->den == 0 || duty->num > duty->den) {
2755 ret = -EINVAL;
2770 if (core->parent &&
2771 core->flags & CLK_DUTY_CYCLE_PARENT) {
2772 ret = clk_core_update_duty_cycle_nolock(core->parent);
2773 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2792 return -EBUSY;
2796 if (!core->ops->set_duty_cycle)
2799 ret = core->ops->set_duty_cycle(core->hw, duty);
2801 memcpy(&core->duty, duty, sizeof(*duty));
2813 if (core->parent &&
2814 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2815 ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2816 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2823 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
2843 return -EINVAL;
2850 if (clk->exclusive_count)
2851 clk_core_rate_unprotect(clk->core);
2853 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2855 if (clk->exclusive_count)
2856 clk_core_rate_protect(clk->core);
2867 struct clk_duty *duty = &core->duty;
2874 ret = mult_frac(scale, duty->num, duty->den);
2882 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
2894 return clk_core_get_scaled_duty_cycle(clk->core, scale);
2899 * clk_is_match - check if two clk's point to the same hardware clock
2915 /* true if clk->core pointers match. Avoid dereferencing garbage */
2917 if (p->core == q->core)
2944 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
2946 30 - level * 3, c->name,
2947 c->enable_count, c->prepare_count, c->protect_count,
2955 seq_puts(s, "-----");
2967 hlist_for_each_entry(child, &c->children, child_node)
2974 struct hlist_head **lists = (struct hlist_head **)s->private;
2978 seq_puts(s, "---------------------------------------------------------------------------------------------\n");
3000 seq_printf(s, "\"%s\": { ", c->name);
3001 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
3002 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
3003 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
3021 hlist_for_each_entry(child, &c->children, child_node) {
3033 struct hlist_head **lists = (struct hlist_head **)s->private;
3081 ret = clk_prepare_enable(core->hw->clk);
3083 clk_disable_unprepare(core->hw->clk);
3092 *val = core->enable_count && core->prepare_count;
3108 *val = core->rate;
3136 struct clk_core *core = s->private;
3137 unsigned long flags = core->flags;
3166 * 4. Fetch parent clock's clock-output-name if DT index was set
3174 seq_puts(s, parent->name);
3175 else if (core->parents[i].name)
3176 seq_puts(s, core->parents[i].name);
3177 else if (core->parents[i].fw_name)
3178 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3179 else if (core->parents[i].index >= 0)
3181 of_clk_get_parent_name(core->of_node,
3182 core->parents[i].index));
3191 struct clk_core *core = s->private;
3194 for (i = 0; i < core->num_parents - 1; i++)
3205 struct clk_core *core = s->private;
3207 if (core->parent)
3208 seq_printf(s, "%s\n", core->parent->name);
3216 struct clk_core *core = s->private;
3217 struct clk_duty *duty = &core->duty;
3219 seq_printf(s, "%u/%u\n", duty->num, duty->den);
3227 struct clk_core *core = s->private;
3241 struct clk_core *core = s->private;
3260 root = debugfs_create_dir(core->name, pdentry);
3261 core->dentry = root;
3267 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3268 debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3270 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3271 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3272 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3273 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3281 if (core->num_parents > 0)
3285 if (core->num_parents > 1)
3289 if (core->ops->debug_init)
3290 core->ops->debug_init(core->hw, core->dentry);
3294 * clk_debug_register - add a clk node to the debugfs clk directory
3304 hlist_add_head(&core->debug_node, &clk_debug_list);
3311 * clk_debug_unregister - remove a clk node from the debugfs clk directory
3315 * debugfs clk directory if clk->dentry points to debugfs created by
3321 hlist_del_init(&core->debug_node);
3322 debugfs_remove_recursive(core->dentry);
3323 core->dentry = NULL;
3328 * clk_debug_init - lazily populate the debugfs clk directory
3332 * populates the debugfs clk directory once at boot-time when we know that
3333 * debugfs is setup. It should only be called once at boot-time, all other clks
3392 * walk the list of orphan clocks and reparent any that newly finds a
3401 * clock. This is important for CLK_IS_CRITICAL clocks, which
3419 * 'req_rate' is set to something non-zero so that
3422 orphan->req_rate = orphan->rate;
3428 * __clk_core_init - initialize the data structures in a struct clk_core
3442 return -EINVAL;
3447 * Set hw->core after grabbing the prepare_lock to synchronize with
3448 * callers of clk_core_fill_parent_index() where we treat hw->core
3452 core->hw->core = core;
3459 if (clk_core_lookup(core->name)) {
3461 __func__, core->name);
3462 ret = -EEXIST;
3466 /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */
3467 if (core->ops->set_rate &&
3468 !((core->ops->round_rate || core->ops->determine_rate) &&
3469 core->ops->recalc_rate)) {
3471 __func__, core->name);
3472 ret = -EINVAL;
3476 if (core->ops->set_parent && !core->ops->get_parent) {
3478 __func__, core->name);
3479 ret = -EINVAL;
3483 if (core->num_parents > 1 && !core->ops->get_parent) {
3485 __func__, core->name);
3486 ret = -EINVAL;
3490 if (core->ops->set_rate_and_parent &&
3491 !(core->ops->set_parent && core->ops->set_rate)) {
3493 __func__, core->name);
3494 ret = -EINVAL;
3499 * optional platform-specific magic
3512 if (core->ops->init) {
3513 ret = core->ops->init(core->hw);
3518 parent = core->parent = __clk_init_parent(core);
3521 * Populate core->parent if parent has already been clk_core_init'd. If
3527 * clocks and re-parent any that are children of the clock currently
3531 hlist_add_head(&core->child_node, &parent->children);
3532 core->orphan = parent->orphan;
3533 } else if (!core->num_parents) {
3534 hlist_add_head(&core->child_node, &clk_root_list);
3535 core->orphan = false;
3537 hlist_add_head(&core->child_node, &clk_orphan_list);
3538 core->orphan = true;
3543 * .recalc_accuracy. For simple clocks and lazy developers the default
3548 if (core->ops->recalc_accuracy)
3549 core->accuracy = core->ops->recalc_accuracy(core->hw,
3552 core->accuracy = parent->accuracy;
3554 core->accuracy = 0;
3565 core->name);
3576 * simple clocks and lazy developers the default fallback is to use the
3580 if (core->ops->recalc_rate)
3581 rate = core->ops->recalc_rate(core->hw,
3584 rate = parent->rate;
3587 core->rate = core->req_rate = rate;
3590 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
3592 * reparenting clocks
3594 if (core->flags & CLK_IS_CRITICAL) {
3600 __func__, core->name);
3609 __func__, core->name);
3618 kref_init(&core->ref);
3623 hlist_del_init(&core->child_node);
3624 core->hw->core = NULL;
3636 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
3643 hlist_add_head(&clk->clks_node, &core->clks);
3648 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
3654 hlist_del(&clk->clks_node);
3658 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
3672 return ERR_PTR(-ENOMEM);
3674 clk->core = core;
3675 clk->dev_id = dev_id;
3676 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3677 clk->max_rate = ULONG_MAX;
3683 * free_clk - Free a clk consumer
3691 kfree_const(clk->con_id);
3717 core = hw->core;
3721 clk->dev = dev;
3723 if (!try_module_get(core->owner)) {
3725 return ERR_PTR(-ENOENT);
3728 kref_get(&core->ref);
3740 return -EINVAL;
3746 return -ENOMEM;
3754 u8 num_parents = init->num_parents;
3755 const char * const *parent_names = init->parent_names;
3756 const struct clk_hw **parent_hws = init->parent_hws;
3757 const struct clk_parent_data *parent_data = init->parent_data;
3765 * Avoid unnecessary string look-ups of clk_core's possible parents by
3769 core->parents = parents;
3771 return -ENOMEM;
3775 parent->index = -1;
3780 __func__, core->name);
3781 ret = clk_cpy_name(&parent->name, parent_names[i],
3784 parent->hw = parent_data[i].hw;
3785 parent->index = parent_data[i].index;
3786 ret = clk_cpy_name(&parent->fw_name,
3789 ret = clk_cpy_name(&parent->name,
3793 parent->hw = parent_hws[i];
3795 ret = -EINVAL;
3803 } while (--i >= 0);
3815 int i = core->num_parents;
3817 if (!core->num_parents)
3820 while (--i >= 0) {
3821 kfree_const(core->parents[i].name);
3822 kfree_const(core->parents[i].fw_name);
3825 kfree(core->parents);
3833 const struct clk_init_data *init = hw->init;
3838 * we catch use of hw->init early on in the core.
3840 hw->init = NULL;
3844 ret = -ENOMEM;
3848 core->name = kstrdup_const(init->name, GFP_KERNEL);
3849 if (!core->name) {
3850 ret = -ENOMEM;
3854 if (WARN_ON(!init->ops)) {
3855 ret = -EINVAL;
3858 core->ops = init->ops;
3861 core->rpm_enabled = true;
3862 core->dev = dev;
3863 core->of_node = np;
3864 if (dev && dev->driver)
3865 core->owner = dev->driver->owner;
3866 core->hw = hw;
3867 core->flags = init->flags;
3868 core->num_parents = init->num_parents;
3869 core->min_rate = 0;
3870 core->max_rate = ULONG_MAX;
3876 INIT_HLIST_HEAD(&core->clks);
3882 hw->clk = alloc_clk(core, NULL, NULL);
3883 if (IS_ERR(hw->clk)) {
3884 ret = PTR_ERR(hw->clk);
3888 clk_core_link_consumer(core, hw->clk);
3892 return hw->clk;
3895 clk_core_unlink_consumer(hw->clk);
3898 free_clk(hw->clk);
3899 hw->clk = NULL;
3905 kfree_const(core->name);
3913 * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
3917 * @dev->parent if dev doesn't have a device node, or NULL if neither
3918 * @dev or @dev->parent have a device node.
3929 np = dev_of_node(dev->parent);
3935 * clk_register - allocate a new clock, register it and return an opaque cookie
3937 * @hw: link to hardware-specific clock data
3954 * clk_hw_register - register a clk_hw and return an error code
3956 * @hw: link to hardware-specific clock data
3971 * of_clk_hw_register - register a clk_hw and return an error code
3973 * @hw: link to hardware-specific clock data
3995 kfree_const(core->name);
4000 * Empty clk_ops for unregistered clocks. These are used temporarily
4006 return -ENXIO;
4017 return -ENXIO;
4022 return -ENXIO;
4040 for (i = 0; i < root->num_parents; i++)
4041 if (root->parents[i].core == target)
4042 root->parents[i].core = NULL;
4044 hlist_for_each_entry(child, &root->children, child_node)
4063 * clk_unregister - unregister a currently registered clock
4074 clk_debug_unregister(clk->core);
4078 ops = clk->core->ops;
4081 clk->core->name);
4089 clk->core->ops = &clk_nodrv_ops;
4092 if (ops->terminate)
4093 ops->terminate(clk->core->hw);
4095 if (!hlist_empty(&clk->core->children)) {
4100 hlist_for_each_entry_safe(child, t, &clk->core->children,
4105 clk_core_evict_parent_cache(clk->core);
4107 hlist_del_init(&clk->core->child_node);
4109 if (clk->core->prepare_count)
4111 __func__, clk->core->name);
4113 if (clk->core->protect_count)
4114 pr_warn("%s: unregistering protected clock: %s\n",
4115 __func__, clk->core->name);
4117 kref_put(&clk->core->ref, __clk_release);
4125 * clk_hw_unregister - unregister a currently registered clk_hw
4126 * @hw: hardware-specific clock data to unregister
4130 clk_unregister(hw->clk);
4145 * devm_clk_register - resource managed clk_register()
4147 * @hw: link to hardware-specific clock data
4151 * Clocks returned from this function are automatically clk_unregister()ed on
4161 return ERR_PTR(-ENOMEM);
4176 * devm_clk_hw_register - resource managed clk_hw_register()
4178 * @hw: link to hardware-specific clock data
4180 * Managed clk_hw_register(). Clocks registered by this function are
4191 return -ENOMEM;
4223 * devm_clk_unregister - resource managed clk_unregister()
4238 * devm_clk_hw_unregister - resource managed clk_hw_unregister()
4239 * @dev: device that is unregistering the hardware-specific clock data
4240 * @hw: link to hardware-specific clock data
4271 if (WARN_ON(clk->exclusive_count)) {
4273 clk->core->protect_count -= (clk->exclusive_count - 1);
4274 clk_core_rate_unprotect(clk->core);
4275 clk->exclusive_count = 0;
4278 hlist_del(&clk->clks_node);
4279 if (clk->min_rate > clk->core->req_rate ||
4280 clk->max_rate < clk->core->req_rate)
4281 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4283 owner = clk->core->owner;
4284 kref_put(&clk->core->ref, __clk_release);
4296 * clk_notifier_register - add a clk rate change notifier
4303 * re-enter into the clk framework by calling any top-level clk APIs;
4310 * clk_notifier_register() must be called from non-atomic context.
4311 * Returns -EINVAL if called with null arguments, -ENOMEM upon
4318 int ret = -ENOMEM;
4321 return -EINVAL;
4327 if (cn->clk == clk)
4335 cn->clk = clk;
4336 srcu_init_notifier_head(&cn->notifier_head);
4338 list_add(&cn->node, &clk_notifier_list);
4341 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4343 clk->core->notifier_count++;
4353 * clk_notifier_unregister - remove a clk rate change notifier
4360 * Returns -EINVAL if called with null arguments; otherwise, passes
4366 int ret = -ENOENT;
4369 return -EINVAL;
4374 if (cn->clk == clk) {
4375 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4377 clk->core->notifier_count--;
4380 if (!cn->notifier_head.head) {
4381 srcu_cleanup_notifier_head(&cn->notifier_head);
4382 list_del(&cn->node);
4404 * struct of_clk_provider - Clock provider registration structure
4445 unsigned int idx = clkspec->args[0];
4447 if (idx >= clk_data->clk_num) {
4449 return ERR_PTR(-EINVAL);
4452 return clk_data->clks[idx];
4460 unsigned int idx = clkspec->args[0];
4462 if (idx >= hw_data->num) {
4464 return ERR_PTR(-EINVAL);
4467 return hw_data->hws[idx];
4472 * of_clk_add_provider() - Register a clock provider for a node
4489 return -ENOMEM;
4491 cp->node = of_node_get(np);
4492 cp->data = data;
4493 cp->get = clk_src_get;
4496 list_add(&cp->link, &of_clk_providers);
4511 * of_clk_add_hw_provider() - Register a clock provider for a node
4526 return -ENOMEM;
4528 cp->node = of_node_get(np);
4529 cp->data = data;
4530 cp->get_hw = get;
4533 list_add(&cp->link, &of_clk_providers);
4554 * for cases like MFD sub-devices where the child device driver wants to use
4555 * devm_*() APIs but not list the device in DT as a sub-node.
4561 np = dev->of_node;
4562 parent_np = dev->parent ? dev->parent->of_node : NULL;
4564 if (!of_find_property(np, "#clock-cells", NULL))
4565 if (of_find_property(parent_np, "#clock-cells", NULL))
4572 * devm_of_clk_add_hw_provider() - Managed clk provider node registration
4578 * node or if the device node lacks of clock provider information (#clock-cells)
4580 * has the #clock-cells then it is used in registration. Provider is
4596 return -ENOMEM;
4612 * of_clk_del_provider() - Remove a previously registered clock provider
4621 if (cp->node == np) {
4622 list_del(&cp->link);
4623 of_node_put(cp->node);
4643 * devm_of_clk_del_provider() - Remove clock provider registered using devm
4659 * of_parse_clkspec() - Parse a DT clock specifier for a given device node
4665 * Parses a device node's "clocks" and "clock-names" properties to find the
4668 * parsing error. The @index argument is ignored if @name is non-NULL.
4672 * phandle1: clock-controller@1 {
4673 * #clock-cells = <2>;
4676 * phandle2: clock-controller@2 {
4677 * #clock-cells = <1>;
4680 * clock-consumer@3 {
4681 * clocks = <&phandle1 1 2 &phandle2 3>;
4682 * clock-names = "name1", "name2";
4685 * To get a device_node for `clock-controller@2' node you may call this
4688 * of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
4689 * of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
4690 * of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
4692 * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
4693 * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
4694 * the "clock-names" property of @np.
4699 int ret = -ENOENT;
4704 * For named clocks, first look up the name in the
4705 * "clock-names" property. If it cannot be found, then index
4707 * return -EINVAL.
4710 index = of_property_match_string(np, "clock-names", name);
4711 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4720 * has a "clock-ranges" property, then we can try one of its
4721 * clocks.
4723 np = np->parent;
4724 if (np && !of_get_property(np, "clock-ranges", NULL))
4738 if (provider->get_hw)
4739 return provider->get_hw(clkspec, provider->data);
4741 clk = provider->get(clkspec, provider->data);
4751 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
4754 return ERR_PTR(-EINVAL);
4758 if (provider->node == clkspec->np) {
4770 * of_clk_get_from_provider() - Lookup a clock from a clock provider
4813 return __of_clk_get(np, index, np->full_name, NULL);
4818 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
4822 * This function parses the clocks and clock-names properties,
4829 return ERR_PTR(-ENOENT);
4831 return __of_clk_get(np, 0, np->full_name, name);
4836 * of_clk_get_parent_count() - Count the number of clocks a device node has
4839 * Returns: The number of clocks that are possible parents of this node
4845 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
4864 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
4873 * specified into an array offset for the clock-output-names property.
4875 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
4882 /* We went off the end of 'clock-indices' without finding it */
4886 if (of_property_read_string_index(clkspec.np, "clock-output-names",
4893 * the clock as long as #clock-cells = 0.
4898 clk_name = clkspec.np->name;
4914 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
4960 if (PTR_ERR(clk) == -EPROBE_DEFER)
4976 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
4979 * @flags: pointer to top-level framework flags
4981 * Detects if the clock-critical property exists and, if so, sets the
4985 * bindings, such as the one-clock-per-node style that are outdated.
5001 return -EINVAL;
5003 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
5011 * of_clk_init() - Scan and init clock providers from the DT
5030 /* First prepare the list of the clocks providers */
5041 list_del(&clk_provider->node);
5042 of_node_put(clk_provider->np);
5049 parent->clk_init_cb = match->data;
5050 parent->np = of_node_get(np);
5051 list_add_tail(&parent->node, &clk_provider_list);
5058 if (force || parent_ready(clk_provider->np)) {
5061 of_node_set_flag(clk_provider->np,
5064 clk_provider->clk_init_cb(clk_provider->np);
5065 of_clk_set_defaults(clk_provider->np, true);
5067 list_del(&clk_provider->node);
5068 of_node_put(clk_provider->np);