Lines Matching full:core
50 struct clk_core *core; member
98 struct clk_core *core; member
109 static int clk_pm_runtime_get(struct clk_core *core) in clk_pm_runtime_get() argument
113 if (!core->rpm_enabled) in clk_pm_runtime_get()
116 ret = pm_runtime_get_sync(core->dev); in clk_pm_runtime_get()
118 pm_runtime_put_noidle(core->dev); in clk_pm_runtime_get()
124 static void clk_pm_runtime_put(struct clk_core *core) in clk_pm_runtime_put() argument
126 if (!core->rpm_enabled) in clk_pm_runtime_put()
129 pm_runtime_put_sync(core->dev); in clk_pm_runtime_put()
201 static bool clk_core_rate_is_protected(struct clk_core *core) in clk_core_rate_is_protected() argument
203 return core->protect_count; in clk_core_rate_is_protected()
206 static bool clk_core_is_prepared(struct clk_core *core) in clk_core_is_prepared() argument
214 if (!core->ops->is_prepared) in clk_core_is_prepared()
215 return core->prepare_count; in clk_core_is_prepared()
217 if (!clk_pm_runtime_get(core)) { in clk_core_is_prepared()
218 ret = core->ops->is_prepared(core->hw); in clk_core_is_prepared()
219 clk_pm_runtime_put(core); in clk_core_is_prepared()
225 static bool clk_core_is_enabled(struct clk_core *core) in clk_core_is_enabled() argument
233 if (!core->ops->is_enabled) in clk_core_is_enabled()
234 return core->enable_count; in clk_core_is_enabled()
246 if (core->rpm_enabled) { in clk_core_is_enabled()
247 pm_runtime_get_noresume(core->dev); in clk_core_is_enabled()
248 if (!pm_runtime_active(core->dev)) { in clk_core_is_enabled()
259 if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent) in clk_core_is_enabled()
260 if (!clk_core_is_enabled(core->parent)) { in clk_core_is_enabled()
265 ret = core->ops->is_enabled(core->hw); in clk_core_is_enabled()
267 if (core->rpm_enabled) in clk_core_is_enabled()
268 pm_runtime_put(core->dev); in clk_core_is_enabled()
277 return !clk ? NULL : clk->core->name; in __clk_get_name()
283 return hw->core->name; in clk_hw_get_name()
289 return !clk ? NULL : clk->core->hw; in __clk_get_hw()
295 return hw->core->num_parents; in clk_hw_get_num_parents()
301 return hw->core->parent ? hw->core->parent->hw : NULL; in clk_hw_get_parent()
306 struct clk_core *core) in __clk_lookup_subtree() argument
311 if (!strcmp(core->name, name)) in __clk_lookup_subtree()
312 return core; in __clk_lookup_subtree()
314 hlist_for_each_entry(child, &core->children, child_node) { in __clk_lookup_subtree()
369 * @core: clk to find parent of
403 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) in clk_core_get() argument
405 const char *name = core->parents[p_index].fw_name; in clk_core_get()
406 int index = core->parents[p_index].index; in clk_core_get()
408 struct device *dev = core->dev; in clk_core_get()
410 struct device_node *np = core->of_node; in clk_core_get()
428 return hw->core; in clk_core_get()
431 static void clk_core_fill_parent_index(struct clk_core *core, u8 index) in clk_core_fill_parent_index() argument
433 struct clk_parent_map *entry = &core->parents[index]; in clk_core_fill_parent_index()
437 parent = entry->hw->core; in clk_core_fill_parent_index()
446 parent = clk_core_get(core, index); in clk_core_fill_parent_index()
453 entry->core = parent; in clk_core_fill_parent_index()
456 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, in clk_core_get_parent_by_index() argument
459 if (!core || index >= core->num_parents || !core->parents) in clk_core_get_parent_by_index()
462 if (!core->parents[index].core) in clk_core_get_parent_by_index()
463 clk_core_fill_parent_index(core, index); in clk_core_get_parent_by_index()
465 return core->parents[index].core; in clk_core_get_parent_by_index()
473 parent = clk_core_get_parent_by_index(hw->core, index); in clk_hw_get_parent_by_index()
481 return !clk ? 0 : clk->core->enable_count; in __clk_get_enable_count()
484 static unsigned long clk_core_get_rate_nolock(struct clk_core *core) in clk_core_get_rate_nolock() argument
486 if (!core) in clk_core_get_rate_nolock()
489 if (!core->num_parents || core->parent) in clk_core_get_rate_nolock()
490 return core->rate; in clk_core_get_rate_nolock()
502 return clk_core_get_rate_nolock(hw->core); in clk_hw_get_rate()
506 static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core) in clk_core_get_accuracy_no_lock() argument
508 if (!core) in clk_core_get_accuracy_no_lock()
511 return core->accuracy; in clk_core_get_accuracy_no_lock()
516 return hw->core->flags; in clk_hw_get_flags()
522 return clk_core_is_prepared(hw->core); in clk_hw_is_prepared()
528 return clk_core_rate_is_protected(hw->core); in clk_hw_rate_is_protected()
534 return clk_core_is_enabled(hw->core); in clk_hw_is_enabled()
543 return clk_core_is_enabled(clk->core); in __clk_is_enabled()
560 struct clk_core *core = hw->core, *parent, *best_parent = NULL; in clk_mux_determine_rate_flags() local
566 if (core->flags & CLK_SET_RATE_NO_REPARENT) { in clk_mux_determine_rate_flags()
567 parent = core->parent; in clk_mux_determine_rate_flags()
568 if (core->flags & CLK_SET_RATE_PARENT) { in clk_mux_determine_rate_flags()
578 best = clk_core_get_rate_nolock(core); in clk_mux_determine_rate_flags()
585 num_parents = core->num_parents; in clk_mux_determine_rate_flags()
587 parent = clk_core_get_parent_by_index(core, i); in clk_mux_determine_rate_flags()
591 if (core->flags & CLK_SET_RATE_PARENT) { in clk_mux_determine_rate_flags()
622 struct clk_core *core = clk_core_lookup(name); in __clk_lookup() local
624 return !core ? NULL : core->hw->clk; in __clk_lookup()
627 static void clk_core_get_boundaries(struct clk_core *core, in clk_core_get_boundaries() argument
635 *min_rate = core->min_rate; in clk_core_get_boundaries()
636 *max_rate = core->max_rate; in clk_core_get_boundaries()
638 hlist_for_each_entry(clk_user, &core->clks, clks_node) in clk_core_get_boundaries()
641 hlist_for_each_entry(clk_user, &core->clks, clks_node) in clk_core_get_boundaries()
645 static bool clk_core_check_boundaries(struct clk_core *core, in clk_core_check_boundaries() argument
653 if (min_rate > core->max_rate || max_rate < core->min_rate) in clk_core_check_boundaries()
656 hlist_for_each_entry(user, &core->clks, clks_node) in clk_core_check_boundaries()
666 hw->core->min_rate = min_rate; in clk_hw_set_rate_range()
667 hw->core->max_rate = max_rate; in clk_hw_set_rate_range()
698 static void clk_core_rate_unprotect(struct clk_core *core) in clk_core_rate_unprotect() argument
702 if (!core) in clk_core_rate_unprotect()
705 if (WARN(core->protect_count == 0, in clk_core_rate_unprotect()
706 "%s already unprotected\n", core->name)) in clk_core_rate_unprotect()
709 if (--core->protect_count > 0) in clk_core_rate_unprotect()
712 clk_core_rate_unprotect(core->parent); in clk_core_rate_unprotect()
715 static int clk_core_rate_nuke_protect(struct clk_core *core) in clk_core_rate_nuke_protect() argument
721 if (!core) in clk_core_rate_nuke_protect()
724 if (core->protect_count == 0) in clk_core_rate_nuke_protect()
727 ret = core->protect_count; in clk_core_rate_nuke_protect()
728 core->protect_count = 1; in clk_core_rate_nuke_protect()
729 clk_core_rate_unprotect(core); in clk_core_rate_nuke_protect()
766 clk_core_rate_unprotect(clk->core); in clk_rate_exclusive_put()
773 static void clk_core_rate_protect(struct clk_core *core) in clk_core_rate_protect() argument
777 if (!core) in clk_core_rate_protect()
780 if (core->protect_count == 0) in clk_core_rate_protect()
781 clk_core_rate_protect(core->parent); in clk_core_rate_protect()
783 core->protect_count++; in clk_core_rate_protect()
786 static void clk_core_rate_restore_protect(struct clk_core *core, int count) in clk_core_rate_restore_protect() argument
790 if (!core) in clk_core_rate_restore_protect()
796 clk_core_rate_protect(core); in clk_core_rate_restore_protect()
797 core->protect_count = count; in clk_core_rate_restore_protect()
824 clk_core_rate_protect(clk->core); in clk_rate_exclusive_get()
832 static void clk_core_unprepare(struct clk_core *core) in clk_core_unprepare() argument
836 if (!core) in clk_core_unprepare()
839 if (WARN(core->prepare_count == 0, in clk_core_unprepare()
840 "%s already unprepared\n", core->name)) in clk_core_unprepare()
843 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, in clk_core_unprepare()
844 "Unpreparing critical %s\n", core->name)) in clk_core_unprepare()
847 if (core->flags & CLK_SET_RATE_GATE) in clk_core_unprepare()
848 clk_core_rate_unprotect(core); in clk_core_unprepare()
850 if (--core->prepare_count > 0) in clk_core_unprepare()
853 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); in clk_core_unprepare()
855 trace_clk_unprepare(core); in clk_core_unprepare()
857 if (core->ops->unprepare) in clk_core_unprepare()
858 core->ops->unprepare(core->hw); in clk_core_unprepare()
860 trace_clk_unprepare_complete(core); in clk_core_unprepare()
861 clk_core_unprepare(core->parent); in clk_core_unprepare()
862 clk_pm_runtime_put(core); in clk_core_unprepare()
865 static void clk_core_unprepare_lock(struct clk_core *core) in clk_core_unprepare_lock() argument
868 clk_core_unprepare(core); in clk_core_unprepare_lock()
888 clk_core_unprepare_lock(clk->core); in clk_unprepare()
892 static int clk_core_prepare(struct clk_core *core) in clk_core_prepare() argument
898 if (!core) in clk_core_prepare()
901 if (core->prepare_count == 0) { in clk_core_prepare()
902 ret = clk_pm_runtime_get(core); in clk_core_prepare()
906 ret = clk_core_prepare(core->parent); in clk_core_prepare()
910 trace_clk_prepare(core); in clk_core_prepare()
912 if (core->ops->prepare) in clk_core_prepare()
913 ret = core->ops->prepare(core->hw); in clk_core_prepare()
915 trace_clk_prepare_complete(core); in clk_core_prepare()
921 core->prepare_count++; in clk_core_prepare()
930 if (core->flags & CLK_SET_RATE_GATE) in clk_core_prepare()
931 clk_core_rate_protect(core); in clk_core_prepare()
935 clk_core_unprepare(core->parent); in clk_core_prepare()
937 clk_pm_runtime_put(core); in clk_core_prepare()
941 static int clk_core_prepare_lock(struct clk_core *core) in clk_core_prepare_lock() argument
946 ret = clk_core_prepare(core); in clk_core_prepare_lock()
969 return clk_core_prepare_lock(clk->core); in clk_prepare()
973 static void clk_core_disable(struct clk_core *core) in clk_core_disable() argument
977 if (!core) in clk_core_disable()
980 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) in clk_core_disable()
983 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, in clk_core_disable()
984 "Disabling critical %s\n", core->name)) in clk_core_disable()
987 if (--core->enable_count > 0) in clk_core_disable()
990 trace_clk_disable_rcuidle(core); in clk_core_disable()
992 if (core->ops->disable) in clk_core_disable()
993 core->ops->disable(core->hw); in clk_core_disable()
995 trace_clk_disable_complete_rcuidle(core); in clk_core_disable()
997 clk_core_disable(core->parent); in clk_core_disable()
1000 static void clk_core_disable_lock(struct clk_core *core) in clk_core_disable_lock() argument
1005 clk_core_disable(core); in clk_core_disable_lock()
1026 clk_core_disable_lock(clk->core); in clk_disable()
1030 static int clk_core_enable(struct clk_core *core) in clk_core_enable() argument
1036 if (!core) in clk_core_enable()
1039 if (WARN(core->prepare_count == 0, in clk_core_enable()
1040 "Enabling unprepared %s\n", core->name)) in clk_core_enable()
1043 if (core->enable_count == 0) { in clk_core_enable()
1044 ret = clk_core_enable(core->parent); in clk_core_enable()
1049 trace_clk_enable_rcuidle(core); in clk_core_enable()
1051 if (core->ops->enable) in clk_core_enable()
1052 ret = core->ops->enable(core->hw); in clk_core_enable()
1054 trace_clk_enable_complete_rcuidle(core); in clk_core_enable()
1057 clk_core_disable(core->parent); in clk_core_enable()
1062 core->enable_count++; in clk_core_enable()
1066 static int clk_core_enable_lock(struct clk_core *core) in clk_core_enable_lock() argument
1072 ret = clk_core_enable(core); in clk_core_enable_lock()
1090 struct clk_core *core = hw->core; in clk_gate_restore_context() local
1092 if (core->enable_count) in clk_gate_restore_context()
1093 core->ops->enable(hw); in clk_gate_restore_context()
1095 core->ops->disable(hw); in clk_gate_restore_context()
1099 static int clk_core_save_context(struct clk_core *core) in clk_core_save_context() argument
1104 hlist_for_each_entry(child, &core->children, child_node) { in clk_core_save_context()
1110 if (core->ops && core->ops->save_context) in clk_core_save_context()
1111 ret = core->ops->save_context(core->hw); in clk_core_save_context()
1116 static void clk_core_restore_context(struct clk_core *core) in clk_core_restore_context() argument
1120 if (core->ops && core->ops->restore_context) in clk_core_restore_context()
1121 core->ops->restore_context(core->hw); in clk_core_restore_context()
1123 hlist_for_each_entry(child, &core->children, child_node) in clk_core_restore_context()
1163 struct clk_core *core; in clk_restore_context() local
1165 hlist_for_each_entry(core, &clk_root_list, child_node) in clk_restore_context()
1166 clk_core_restore_context(core); in clk_restore_context()
1168 hlist_for_each_entry(core, &clk_orphan_list, child_node) in clk_restore_context()
1169 clk_core_restore_context(core); in clk_restore_context()
1191 return clk_core_enable_lock(clk->core); in clk_enable()
1195 static int clk_core_prepare_enable(struct clk_core *core) in clk_core_prepare_enable() argument
1199 ret = clk_core_prepare_lock(core); in clk_core_prepare_enable()
1203 ret = clk_core_enable_lock(core); in clk_core_prepare_enable()
1205 clk_core_unprepare_lock(core); in clk_core_prepare_enable()
1210 static void clk_core_disable_unprepare(struct clk_core *core) in clk_core_disable_unprepare() argument
1212 clk_core_disable_lock(core); in clk_core_disable_unprepare()
1213 clk_core_unprepare_lock(core); in clk_core_disable_unprepare()
1216 static void __init clk_unprepare_unused_subtree(struct clk_core *core) in clk_unprepare_unused_subtree() argument
1222 hlist_for_each_entry(child, &core->children, child_node) in clk_unprepare_unused_subtree()
1225 if (core->prepare_count) in clk_unprepare_unused_subtree()
1228 if (core->flags & CLK_IGNORE_UNUSED) in clk_unprepare_unused_subtree()
1231 if (clk_pm_runtime_get(core)) in clk_unprepare_unused_subtree()
1234 if (clk_core_is_prepared(core)) { in clk_unprepare_unused_subtree()
1235 trace_clk_unprepare(core); in clk_unprepare_unused_subtree()
1236 if (core->ops->unprepare_unused) in clk_unprepare_unused_subtree()
1237 core->ops->unprepare_unused(core->hw); in clk_unprepare_unused_subtree()
1238 else if (core->ops->unprepare) in clk_unprepare_unused_subtree()
1239 core->ops->unprepare(core->hw); in clk_unprepare_unused_subtree()
1240 trace_clk_unprepare_complete(core); in clk_unprepare_unused_subtree()
1243 clk_pm_runtime_put(core); in clk_unprepare_unused_subtree()
1246 static void __init clk_disable_unused_subtree(struct clk_core *core) in clk_disable_unused_subtree() argument
1253 hlist_for_each_entry(child, &core->children, child_node) in clk_disable_unused_subtree()
1256 if (core->flags & CLK_OPS_PARENT_ENABLE) in clk_disable_unused_subtree()
1257 clk_core_prepare_enable(core->parent); in clk_disable_unused_subtree()
1259 if (clk_pm_runtime_get(core)) in clk_disable_unused_subtree()
1264 if (core->enable_count) in clk_disable_unused_subtree()
1267 if (core->flags & CLK_IGNORE_UNUSED) in clk_disable_unused_subtree()
1275 if (clk_core_is_enabled(core)) { in clk_disable_unused_subtree()
1276 trace_clk_disable(core); in clk_disable_unused_subtree()
1277 if (core->ops->disable_unused) in clk_disable_unused_subtree()
1278 core->ops->disable_unused(core->hw); in clk_disable_unused_subtree()
1279 else if (core->ops->disable) in clk_disable_unused_subtree()
1280 core->ops->disable(core->hw); in clk_disable_unused_subtree()
1281 trace_clk_disable_complete(core); in clk_disable_unused_subtree()
1286 clk_pm_runtime_put(core); in clk_disable_unused_subtree()
1288 if (core->flags & CLK_OPS_PARENT_ENABLE) in clk_disable_unused_subtree()
1289 clk_core_disable_unprepare(core->parent); in clk_disable_unused_subtree()
1302 struct clk_core *core; in clk_disable_unused() local
1311 hlist_for_each_entry(core, &clk_root_list, child_node) in clk_disable_unused()
1312 clk_disable_unused_subtree(core); in clk_disable_unused()
1314 hlist_for_each_entry(core, &clk_orphan_list, child_node) in clk_disable_unused()
1315 clk_disable_unused_subtree(core); in clk_disable_unused()
1317 hlist_for_each_entry(core, &clk_root_list, child_node) in clk_disable_unused()
1318 clk_unprepare_unused_subtree(core); in clk_disable_unused()
1320 hlist_for_each_entry(core, &clk_orphan_list, child_node) in clk_disable_unused()
1321 clk_unprepare_unused_subtree(core); in clk_disable_unused()
1329 static int clk_core_determine_round_nolock(struct clk_core *core, in clk_core_determine_round_nolock() argument
1336 if (!core) in clk_core_determine_round_nolock()
1340 * At this point, core protection will be disabled if in clk_core_determine_round_nolock()
1345 if (clk_core_rate_is_protected(core)) { in clk_core_determine_round_nolock()
1346 req->rate = core->rate; in clk_core_determine_round_nolock()
1347 } else if (core->ops->determine_rate) { in clk_core_determine_round_nolock()
1348 return core->ops->determine_rate(core->hw, req); in clk_core_determine_round_nolock()
1349 } else if (core->ops->round_rate) { in clk_core_determine_round_nolock()
1350 rate = core->ops->round_rate(core->hw, req->rate, in clk_core_determine_round_nolock()
1363 static void clk_core_init_rate_req(struct clk_core * const core, in clk_core_init_rate_req() argument
1368 if (WARN_ON(!core || !req)) in clk_core_init_rate_req()
1371 parent = core->parent; in clk_core_init_rate_req()
1381 static bool clk_core_can_round(struct clk_core * const core) in clk_core_can_round() argument
1383 return core->ops->determine_rate || core->ops->round_rate; in clk_core_can_round()
1386 static int clk_core_round_rate_nolock(struct clk_core *core, in clk_core_round_rate_nolock() argument
1391 if (!core) { in clk_core_round_rate_nolock()
1396 clk_core_init_rate_req(core, req); in clk_core_round_rate_nolock()
1398 if (clk_core_can_round(core)) in clk_core_round_rate_nolock()
1399 return clk_core_determine_round_nolock(core, req); in clk_core_round_rate_nolock()
1400 else if (core->flags & CLK_SET_RATE_PARENT) in clk_core_round_rate_nolock()
1401 return clk_core_round_rate_nolock(core->parent, req); in clk_core_round_rate_nolock()
1403 req->rate = core->rate; in clk_core_round_rate_nolock()
1421 return clk_core_round_rate_nolock(hw->core, req); in __clk_determine_rate()
1445 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); in clk_hw_round_rate()
1448 ret = clk_core_round_rate_nolock(hw->core, &req); in clk_hw_round_rate()
1476 clk_core_rate_unprotect(clk->core); in clk_round_rate()
1478 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); in clk_round_rate()
1481 ret = clk_core_round_rate_nolock(clk->core, &req); in clk_round_rate()
1484 clk_core_rate_protect(clk->core); in clk_round_rate()
1497 * @core: clk that is changing rate
1509 static int __clk_notify(struct clk_core *core, unsigned long msg, in __clk_notify() argument
1520 if (cn->clk->core == core) { in __clk_notify()
1534 * @core: first clk in the subtree
1541 static void __clk_recalc_accuracies(struct clk_core *core) in __clk_recalc_accuracies() argument
1548 if (core->parent) in __clk_recalc_accuracies()
1549 parent_accuracy = core->parent->accuracy; in __clk_recalc_accuracies()
1551 if (core->ops->recalc_accuracy) in __clk_recalc_accuracies()
1552 core->accuracy = core->ops->recalc_accuracy(core->hw, in __clk_recalc_accuracies()
1555 core->accuracy = parent_accuracy; in __clk_recalc_accuracies()
1557 hlist_for_each_entry(child, &core->children, child_node) in __clk_recalc_accuracies()
1561 static long clk_core_get_accuracy_recalc(struct clk_core *core) in clk_core_get_accuracy_recalc() argument
1563 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) in clk_core_get_accuracy_recalc()
1564 __clk_recalc_accuracies(core); in clk_core_get_accuracy_recalc()
1566 return clk_core_get_accuracy_no_lock(core); in clk_core_get_accuracy_recalc()
1586 accuracy = clk_core_get_accuracy_recalc(clk->core); in clk_get_accuracy()
1593 static unsigned long clk_recalc(struct clk_core *core, in clk_recalc() argument
1598 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { in clk_recalc()
1599 rate = core->ops->recalc_rate(core->hw, parent_rate); in clk_recalc()
1600 clk_pm_runtime_put(core); in clk_recalc()
1607 * @core: first clk in the subtree
1617 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) in __clk_recalc_rates() argument
1625 old_rate = core->rate; in __clk_recalc_rates()
1627 if (core->parent) in __clk_recalc_rates()
1628 parent_rate = core->parent->rate; in __clk_recalc_rates()
1630 core->rate = clk_recalc(core, parent_rate); in __clk_recalc_rates()
1636 if (core->notifier_count && msg) in __clk_recalc_rates()
1637 __clk_notify(core, msg, old_rate, core->rate); in __clk_recalc_rates()
1639 hlist_for_each_entry(child, &core->children, child_node) in __clk_recalc_rates()
1643 static unsigned long clk_core_get_rate_recalc(struct clk_core *core) in clk_core_get_rate_recalc() argument
1645 if (core && (core->flags & CLK_GET_RATE_NOCACHE)) in clk_core_get_rate_recalc()
1646 __clk_recalc_rates(core, 0); in clk_core_get_rate_recalc()
1648 return clk_core_get_rate_nolock(core); in clk_core_get_rate_recalc()
1667 rate = clk_core_get_rate_recalc(clk->core); in clk_get_rate()
1674 static int clk_fetch_parent_index(struct clk_core *core, in clk_fetch_parent_index() argument
1682 for (i = 0; i < core->num_parents; i++) { in clk_fetch_parent_index()
1684 if (core->parents[i].core == parent) in clk_fetch_parent_index()
1688 if (core->parents[i].core) in clk_fetch_parent_index()
1691 /* Maybe core hasn't been cached but the hw is all we know? */ in clk_fetch_parent_index()
1692 if (core->parents[i].hw) { in clk_fetch_parent_index()
1693 if (core->parents[i].hw == parent->hw) in clk_fetch_parent_index()
1701 if (parent == clk_core_get(core, i)) in clk_fetch_parent_index()
1705 if (core->parents[i].name && in clk_fetch_parent_index()
1706 !strcmp(parent->name, core->parents[i].name)) in clk_fetch_parent_index()
1710 if (i == core->num_parents) in clk_fetch_parent_index()
1713 core->parents[i].core = parent; in clk_fetch_parent_index()
1731 return clk_fetch_parent_index(hw->core, parent->core); in clk_hw_get_parent_index()
1736 * Update the orphan status of @core and all its children.
1738 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) in clk_core_update_orphan_status() argument
1742 core->orphan = is_orphan; in clk_core_update_orphan_status()
1744 hlist_for_each_entry(child, &core->children, child_node) in clk_core_update_orphan_status()
1748 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) in clk_reparent() argument
1750 bool was_orphan = core->orphan; in clk_reparent()
1752 hlist_del(&core->child_node); in clk_reparent()
1758 if (new_parent->new_child == core) in clk_reparent()
1761 hlist_add_head(&core->child_node, &new_parent->children); in clk_reparent()
1764 clk_core_update_orphan_status(core, becomes_orphan); in clk_reparent()
1766 hlist_add_head(&core->child_node, &clk_orphan_list); in clk_reparent()
1768 clk_core_update_orphan_status(core, true); in clk_reparent()
1771 core->parent = new_parent; in clk_reparent()
1774 static struct clk_core *__clk_set_parent_before(struct clk_core *core, in __clk_set_parent_before() argument
1778 struct clk_core *old_parent = core->parent; in __clk_set_parent_before()
1801 if (core->flags & CLK_OPS_PARENT_ENABLE) { in __clk_set_parent_before()
1807 if (core->prepare_count) { in __clk_set_parent_before()
1809 clk_core_enable_lock(core); in __clk_set_parent_before()
1814 clk_reparent(core, parent); in __clk_set_parent_before()
1820 static void __clk_set_parent_after(struct clk_core *core, in __clk_set_parent_after() argument
1828 if (core->prepare_count) { in __clk_set_parent_after()
1829 clk_core_disable_lock(core); in __clk_set_parent_after()
1834 if (core->flags & CLK_OPS_PARENT_ENABLE) { in __clk_set_parent_after()
1840 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, in __clk_set_parent() argument
1847 old_parent = __clk_set_parent_before(core, parent); in __clk_set_parent()
1849 trace_clk_set_parent(core, parent); in __clk_set_parent()
1852 if (parent && core->ops->set_parent) in __clk_set_parent()
1853 ret = core->ops->set_parent(core->hw, p_index); in __clk_set_parent()
1855 trace_clk_set_parent_complete(core, parent); in __clk_set_parent()
1859 clk_reparent(core, old_parent); in __clk_set_parent()
1861 __clk_set_parent_after(core, old_parent, parent); in __clk_set_parent()
1866 __clk_set_parent_after(core, parent, old_parent); in __clk_set_parent()
1873 * @core: first clk in the subtree
1885 static int __clk_speculate_rates(struct clk_core *core, in __clk_speculate_rates() argument
1894 new_rate = clk_recalc(core, parent_rate); in __clk_speculate_rates()
1897 if (core->notifier_count) in __clk_speculate_rates()
1898 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); in __clk_speculate_rates()
1902 __func__, core->name, ret); in __clk_speculate_rates()
1906 hlist_for_each_entry(child, &core->children, child_node) { in __clk_speculate_rates()
1916 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, in clk_calc_subtree() argument
1921 core->new_rate = new_rate; in clk_calc_subtree()
1922 core->new_parent = new_parent; in clk_calc_subtree()
1923 core->new_parent_index = p_index; in clk_calc_subtree()
1925 core->new_child = NULL; in clk_calc_subtree()
1926 if (new_parent && new_parent != core->parent) in clk_calc_subtree()
1927 new_parent->new_child = core; in clk_calc_subtree()
1929 hlist_for_each_entry(child, &core->children, child_node) { in clk_calc_subtree()
1939 static struct clk_core *clk_calc_new_rates(struct clk_core *core, in clk_calc_new_rates() argument
1942 struct clk_core *top = core; in clk_calc_new_rates()
1952 if (IS_ERR_OR_NULL(core)) in clk_calc_new_rates()
1956 parent = old_parent = core->parent; in clk_calc_new_rates()
1960 clk_core_get_boundaries(core, &min_rate, &max_rate); in clk_calc_new_rates()
1963 if (clk_core_can_round(core)) { in clk_calc_new_rates()
1970 clk_core_init_rate_req(core, &req); in clk_calc_new_rates()
1972 ret = clk_core_determine_round_nolock(core, &req); in clk_calc_new_rates()
1978 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; in clk_calc_new_rates()
1982 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { in clk_calc_new_rates()
1984 core->new_rate = core->rate; in clk_calc_new_rates()
1995 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { in clk_calc_new_rates()
1997 __func__, core->name); in clk_calc_new_rates()
2002 if (parent && core->num_parents > 1) { in clk_calc_new_rates()
2003 p_index = clk_fetch_parent_index(core, parent); in clk_calc_new_rates()
2006 __func__, parent->name, core->name); in clk_calc_new_rates()
2011 if ((core->flags & CLK_SET_RATE_PARENT) && parent && in clk_calc_new_rates()
2016 clk_calc_subtree(core, new_rate, parent, p_index); in clk_calc_new_rates()
2026 static struct clk_core *clk_propagate_rate_change(struct clk_core *core, in clk_propagate_rate_change() argument
2032 if (core->rate == core->new_rate) in clk_propagate_rate_change()
2035 if (core->notifier_count) { in clk_propagate_rate_change()
2036 ret = __clk_notify(core, event, core->rate, core->new_rate); in clk_propagate_rate_change()
2038 fail_clk = core; in clk_propagate_rate_change()
2041 hlist_for_each_entry(child, &core->children, child_node) { in clk_propagate_rate_change()
2043 if (child->new_parent && child->new_parent != core) in clk_propagate_rate_change()
2050 /* handle the new child who might not be in core->children yet */ in clk_propagate_rate_change()
2051 if (core->new_child) { in clk_propagate_rate_change()
2052 tmp_clk = clk_propagate_rate_change(core->new_child, event); in clk_propagate_rate_change()
2064 static void clk_change_rate(struct clk_core *core) in clk_change_rate() argument
2074 old_rate = core->rate; in clk_change_rate()
2076 if (core->new_parent) { in clk_change_rate()
2077 parent = core->new_parent; in clk_change_rate()
2078 best_parent_rate = core->new_parent->rate; in clk_change_rate()
2079 } else if (core->parent) { in clk_change_rate()
2080 parent = core->parent; in clk_change_rate()
2081 best_parent_rate = core->parent->rate; in clk_change_rate()
2084 if (clk_pm_runtime_get(core)) in clk_change_rate()
2087 if (core->flags & CLK_SET_RATE_UNGATE) { in clk_change_rate()
2090 clk_core_prepare(core); in clk_change_rate()
2092 clk_core_enable(core); in clk_change_rate()
2096 if (core->new_parent && core->new_parent != core->parent) { in clk_change_rate()
2097 old_parent = __clk_set_parent_before(core, core->new_parent); in clk_change_rate()
2098 trace_clk_set_parent(core, core->new_parent); in clk_change_rate()
2100 if (core->ops->set_rate_and_parent) { in clk_change_rate()
2102 core->ops->set_rate_and_parent(core->hw, core->new_rate, in clk_change_rate()
2104 core->new_parent_index); in clk_change_rate()
2105 } else if (core->ops->set_parent) { in clk_change_rate()
2106 core->ops->set_parent(core->hw, core->new_parent_index); in clk_change_rate()
2109 trace_clk_set_parent_complete(core, core->new_parent); in clk_change_rate()
2110 __clk_set_parent_after(core, core->new_parent, old_parent); in clk_change_rate()
2113 if (core->flags & CLK_OPS_PARENT_ENABLE) in clk_change_rate()
2116 trace_clk_set_rate(core, core->new_rate); in clk_change_rate()
2118 if (!skip_set_rate && core->ops->set_rate) in clk_change_rate()
2119 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); in clk_change_rate()
2121 trace_clk_set_rate_complete(core, core->new_rate); in clk_change_rate()
2123 core->rate = clk_recalc(core, best_parent_rate); in clk_change_rate()
2125 if (core->flags & CLK_SET_RATE_UNGATE) { in clk_change_rate()
2129 clk_core_disable(core); in clk_change_rate()
2131 clk_core_unprepare(core); in clk_change_rate()
2134 if (core->flags & CLK_OPS_PARENT_ENABLE) in clk_change_rate()
2137 if (core->notifier_count && old_rate != core->rate) in clk_change_rate()
2138 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); in clk_change_rate()
2140 if (core->flags & CLK_RECALC_NEW_RATES) in clk_change_rate()
2141 (void)clk_calc_new_rates(core, core->new_rate); in clk_change_rate()
2147 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { in clk_change_rate()
2149 if (child->new_parent && child->new_parent != core) in clk_change_rate()
2154 /* handle the new child who might not be in core->children yet */ in clk_change_rate()
2155 if (core->new_child) in clk_change_rate()
2156 clk_change_rate(core->new_child); in clk_change_rate()
2158 clk_pm_runtime_put(core); in clk_change_rate()
2161 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, in clk_core_req_round_rate_nolock() argument
2169 if (!core) in clk_core_req_round_rate_nolock()
2173 cnt = clk_core_rate_nuke_protect(core); in clk_core_req_round_rate_nolock()
2177 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); in clk_core_req_round_rate_nolock()
2180 ret = clk_core_round_rate_nolock(core, &req); in clk_core_req_round_rate_nolock()
2183 clk_core_rate_restore_protect(core, cnt); in clk_core_req_round_rate_nolock()
2188 static int clk_core_set_rate_nolock(struct clk_core *core, in clk_core_set_rate_nolock() argument
2195 if (!core) in clk_core_set_rate_nolock()
2198 rate = clk_core_req_round_rate_nolock(core, req_rate); in clk_core_set_rate_nolock()
2201 if (rate == clk_core_get_rate_nolock(core)) in clk_core_set_rate_nolock()
2205 if (clk_core_rate_is_protected(core)) in clk_core_set_rate_nolock()
2209 top = clk_calc_new_rates(core, req_rate); in clk_core_set_rate_nolock()
2213 ret = clk_pm_runtime_get(core); in clk_core_set_rate_nolock()
2230 core->req_rate = req_rate; in clk_core_set_rate_nolock()
2232 clk_pm_runtime_put(core); in clk_core_set_rate_nolock()
2269 clk_core_rate_unprotect(clk->core); in clk_set_rate()
2271 ret = clk_core_set_rate_nolock(clk->core, rate); in clk_set_rate()
2274 clk_core_rate_protect(clk->core); in clk_set_rate()
2317 ret = clk_core_set_rate_nolock(clk->core, rate); in clk_set_rate_exclusive()
2319 clk_core_rate_protect(clk->core); in clk_set_rate_exclusive()
2347 __func__, clk->core->name, clk->dev_id, clk->con_id, in clk_set_rate_range()
2355 clk_core_rate_unprotect(clk->core); in clk_set_rate_range()
2363 if (!clk_core_check_boundaries(clk->core, min, max)) { in clk_set_rate_range()
2368 rate = clk_core_get_rate_nolock(clk->core); in clk_set_rate_range()
2388 ret = clk_core_set_rate_nolock(clk->core, rate); in clk_set_rate_range()
2398 clk_core_rate_protect(clk->core); in clk_set_rate_range()
2453 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; in clk_get_parent()
2460 static struct clk_core *__clk_init_parent(struct clk_core *core) in __clk_init_parent() argument
2464 if (core->num_parents > 1 && core->ops->get_parent) in __clk_init_parent()
2465 index = core->ops->get_parent(core->hw); in __clk_init_parent()
2467 return clk_core_get_parent_by_index(core, index); in __clk_init_parent()
2470 static void clk_core_reparent(struct clk_core *core, in clk_core_reparent() argument
2473 clk_reparent(core, new_parent); in clk_core_reparent()
2474 __clk_recalc_accuracies(core); in clk_core_reparent()
2475 __clk_recalc_rates(core, POST_RATE_CHANGE); in clk_core_reparent()
2483 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); in clk_hw_reparent()
2498 struct clk_core *core, *parent_core; in clk_has_parent() local
2505 core = clk->core; in clk_has_parent()
2506 parent_core = parent->core; in clk_has_parent()
2509 if (core->parent == parent_core) in clk_has_parent()
2512 for (i = 0; i < core->num_parents; i++) in clk_has_parent()
2513 if (!strcmp(core->parents[i].name, parent_core->name)) in clk_has_parent()
2520 static int clk_core_set_parent_nolock(struct clk_core *core, in clk_core_set_parent_nolock() argument
2529 if (!core) in clk_core_set_parent_nolock()
2532 if (core->parent == parent) in clk_core_set_parent_nolock()
2536 if (core->num_parents > 1 && !core->ops->set_parent) in clk_core_set_parent_nolock()
2540 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) in clk_core_set_parent_nolock()
2543 if (clk_core_rate_is_protected(core)) in clk_core_set_parent_nolock()
2548 p_index = clk_fetch_parent_index(core, parent); in clk_core_set_parent_nolock()
2551 __func__, parent->name, core->name); in clk_core_set_parent_nolock()
2557 ret = clk_pm_runtime_get(core); in clk_core_set_parent_nolock()
2562 ret = __clk_speculate_rates(core, p_rate); in clk_core_set_parent_nolock()
2569 ret = __clk_set_parent(core, parent, p_index); in clk_core_set_parent_nolock()
2573 __clk_recalc_rates(core, ABORT_RATE_CHANGE); in clk_core_set_parent_nolock()
2575 __clk_recalc_rates(core, POST_RATE_CHANGE); in clk_core_set_parent_nolock()
2576 __clk_recalc_accuracies(core); in clk_core_set_parent_nolock()
2580 clk_pm_runtime_put(core); in clk_core_set_parent_nolock()
2587 return clk_core_set_parent_nolock(hw->core, parent->core); in clk_hw_set_parent()
2618 clk_core_rate_unprotect(clk->core); in clk_set_parent()
2620 ret = clk_core_set_parent_nolock(clk->core, in clk_set_parent()
2621 parent ? parent->core : NULL); in clk_set_parent()
2624 clk_core_rate_protect(clk->core); in clk_set_parent()
2632 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) in clk_core_set_phase_nolock() argument
2638 if (!core) in clk_core_set_phase_nolock()
2641 if (clk_core_rate_is_protected(core)) in clk_core_set_phase_nolock()
2644 trace_clk_set_phase(core, degrees); in clk_core_set_phase_nolock()
2646 if (core->ops->set_phase) { in clk_core_set_phase_nolock()
2647 ret = core->ops->set_phase(core->hw, degrees); in clk_core_set_phase_nolock()
2649 core->phase = degrees; in clk_core_set_phase_nolock()
2652 trace_clk_set_phase_complete(core, degrees); in clk_core_set_phase_nolock()
2692 clk_core_rate_unprotect(clk->core); in clk_set_phase()
2694 ret = clk_core_set_phase_nolock(clk->core, degrees); in clk_set_phase()
2697 clk_core_rate_protect(clk->core); in clk_set_phase()
2705 static int clk_core_get_phase(struct clk_core *core) in clk_core_get_phase() argument
2710 if (!core->ops->get_phase) in clk_core_get_phase()
2714 ret = core->ops->get_phase(core->hw); in clk_core_get_phase()
2716 core->phase = ret; in clk_core_get_phase()
2736 ret = clk_core_get_phase(clk->core); in clk_get_phase()
2743 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) in clk_core_reset_duty_cycle_nolock() argument
2746 core->duty.num = 1; in clk_core_reset_duty_cycle_nolock()
2747 core->duty.den = 2; in clk_core_reset_duty_cycle_nolock()
2750 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2752 static int clk_core_update_duty_cycle_nolock(struct clk_core *core) in clk_core_update_duty_cycle_nolock() argument
2754 struct clk_duty *duty = &core->duty; in clk_core_update_duty_cycle_nolock()
2757 if (!core->ops->get_duty_cycle) in clk_core_update_duty_cycle_nolock()
2758 return clk_core_update_duty_cycle_parent_nolock(core); in clk_core_update_duty_cycle_nolock()
2760 ret = core->ops->get_duty_cycle(core->hw, duty); in clk_core_update_duty_cycle_nolock()
2773 clk_core_reset_duty_cycle_nolock(core); in clk_core_update_duty_cycle_nolock()
2777 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) in clk_core_update_duty_cycle_parent_nolock() argument
2781 if (core->parent && in clk_core_update_duty_cycle_parent_nolock()
2782 core->flags & CLK_DUTY_CYCLE_PARENT) { in clk_core_update_duty_cycle_parent_nolock()
2783 ret = clk_core_update_duty_cycle_nolock(core->parent); in clk_core_update_duty_cycle_parent_nolock()
2784 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); in clk_core_update_duty_cycle_parent_nolock()
2786 clk_core_reset_duty_cycle_nolock(core); in clk_core_update_duty_cycle_parent_nolock()
2792 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2795 static int clk_core_set_duty_cycle_nolock(struct clk_core *core, in clk_core_set_duty_cycle_nolock() argument
2802 if (clk_core_rate_is_protected(core)) in clk_core_set_duty_cycle_nolock()
2805 trace_clk_set_duty_cycle(core, duty); in clk_core_set_duty_cycle_nolock()
2807 if (!core->ops->set_duty_cycle) in clk_core_set_duty_cycle_nolock()
2808 return clk_core_set_duty_cycle_parent_nolock(core, duty); in clk_core_set_duty_cycle_nolock()
2810 ret = core->ops->set_duty_cycle(core->hw, duty); in clk_core_set_duty_cycle_nolock()
2812 memcpy(&core->duty, duty, sizeof(*duty)); in clk_core_set_duty_cycle_nolock()
2814 trace_clk_set_duty_cycle_complete(core, duty); in clk_core_set_duty_cycle_nolock()
2819 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, in clk_core_set_duty_cycle_parent_nolock() argument
2824 if (core->parent && in clk_core_set_duty_cycle_parent_nolock()
2825 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { in clk_core_set_duty_cycle_parent_nolock()
2826 ret = clk_core_set_duty_cycle_nolock(core->parent, duty); in clk_core_set_duty_cycle_parent_nolock()
2827 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); in clk_core_set_duty_cycle_parent_nolock()
2862 clk_core_rate_unprotect(clk->core); in clk_set_duty_cycle()
2864 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); in clk_set_duty_cycle()
2867 clk_core_rate_protect(clk->core); in clk_set_duty_cycle()
2875 static int clk_core_get_scaled_duty_cycle(struct clk_core *core, in clk_core_get_scaled_duty_cycle() argument
2878 struct clk_duty *duty = &core->duty; in clk_core_get_scaled_duty_cycle()
2883 ret = clk_core_update_duty_cycle_nolock(core); in clk_core_get_scaled_duty_cycle()
2905 return clk_core_get_scaled_duty_cycle(clk->core, scale); in clk_get_scaled_duty_cycle()
2926 /* true if clk->core pointers match. Avoid dereferencing garbage */ in clk_is_match()
2928 if (p->core == q->core) in clk_is_match()
3074 struct clk_core *core = data; in clk_rate_set() local
3078 ret = clk_core_set_rate_nolock(core, val); in clk_rate_set()
3088 struct clk_core *core = data; in clk_prepare_enable_set() local
3092 ret = clk_prepare_enable(core->hw->clk); in clk_prepare_enable_set()
3094 clk_disable_unprepare(core->hw->clk); in clk_prepare_enable_set()
3101 struct clk_core *core = data; in clk_prepare_enable_get() local
3103 *val = core->enable_count && core->prepare_count; in clk_prepare_enable_get()
3117 struct clk_core *core = data; in clk_rate_get() local
3119 *val = core->rate; in clk_rate_get()
3147 struct clk_core *core = s->private; in clk_flags_show() local
3148 unsigned long flags = core->flags; in clk_flags_show()
3166 static void possible_parent_show(struct seq_file *s, struct clk_core *core, in possible_parent_show() argument
3183 parent = clk_core_get_parent_by_index(core, i); in possible_parent_show()
3186 else if (core->parents[i].name) in possible_parent_show()
3187 seq_puts(s, core->parents[i].name); in possible_parent_show()
3188 else if (core->parents[i].fw_name) in possible_parent_show()
3189 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); in possible_parent_show()
3190 else if (core->parents[i].index >= 0) in possible_parent_show()
3192 of_clk_get_parent_name(core->of_node, in possible_parent_show()
3193 core->parents[i].index)); in possible_parent_show()
3202 struct clk_core *core = s->private; in possible_parents_show() local
3205 for (i = 0; i < core->num_parents - 1; i++) in possible_parents_show()
3206 possible_parent_show(s, core, i, ' '); in possible_parents_show()
3208 possible_parent_show(s, core, i, '\n'); in possible_parents_show()
3216 struct clk_core *core = s->private; in current_parent_show() local
3218 if (core->parent) in current_parent_show()
3219 seq_printf(s, "%s\n", core->parent->name); in current_parent_show()
3227 struct clk_core *core = s->private; in clk_duty_cycle_show() local
3228 struct clk_duty *duty = &core->duty; in clk_duty_cycle_show()
3238 struct clk_core *core = s->private; in clk_min_rate_show() local
3242 clk_core_get_boundaries(core, &min_rate, &max_rate); in clk_min_rate_show()
3252 struct clk_core *core = s->private; in clk_max_rate_show() local
3256 clk_core_get_boundaries(core, &min_rate, &max_rate); in clk_max_rate_show()
3264 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) in clk_debug_create_one() argument
3268 if (!core || !pdentry) in clk_debug_create_one()
3271 root = debugfs_create_dir(core->name, pdentry); in clk_debug_create_one()
3272 core->dentry = root; in clk_debug_create_one()
3274 debugfs_create_file("clk_rate", clk_rate_mode, root, core, in clk_debug_create_one()
3276 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops); in clk_debug_create_one()
3277 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops); in clk_debug_create_one()
3278 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); in clk_debug_create_one()
3279 debugfs_create_u32("clk_phase", 0444, root, &core->phase); in clk_debug_create_one()
3280 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); in clk_debug_create_one()
3281 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); in clk_debug_create_one()
3282 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); in clk_debug_create_one()
3283 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); in clk_debug_create_one()
3284 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); in clk_debug_create_one()
3285 debugfs_create_file("clk_duty_cycle", 0444, root, core, in clk_debug_create_one()
3288 debugfs_create_file("clk_prepare_enable", 0644, root, core, in clk_debug_create_one()
3292 if (core->num_parents > 0) in clk_debug_create_one()
3293 debugfs_create_file("clk_parent", 0444, root, core, in clk_debug_create_one()
3296 if (core->num_parents > 1) in clk_debug_create_one()
3297 debugfs_create_file("clk_possible_parents", 0444, root, core, in clk_debug_create_one()
3300 if (core->ops->debug_init) in clk_debug_create_one()
3301 core->ops->debug_init(core->hw, core->dentry); in clk_debug_create_one()
3306 * @core: the clk being added to the debugfs clk directory
3312 static void clk_debug_register(struct clk_core *core) in clk_debug_register() argument
3315 hlist_add_head(&core->debug_node, &clk_debug_list); in clk_debug_register()
3317 clk_debug_create_one(core, rootdir); in clk_debug_register()
3323 * @core: the clk being removed from the debugfs clk directory
3329 static void clk_debug_unregister(struct clk_core *core) in clk_debug_unregister() argument
3332 hlist_del_init(&core->debug_node); in clk_debug_unregister()
3333 debugfs_remove_recursive(core->dentry); in clk_debug_unregister()
3334 core->dentry = NULL; in clk_debug_unregister()
3349 struct clk_core *core; in clk_debug_init() local
3381 hlist_for_each_entry(core, &clk_debug_list, debug_node) in clk_debug_init()
3382 clk_debug_create_one(core, rootdir); in clk_debug_init()
3391 static inline void clk_debug_register(struct clk_core *core) { } in clk_debug_register() argument
3392 static inline void clk_debug_unregister(struct clk_core *core) in clk_debug_unregister() argument
3440 * @core: clk_core being initialized
3445 static int __clk_core_init(struct clk_core *core) in __clk_core_init() argument
3452 if (!core) in __clk_core_init()
3458 * Set hw->core after grabbing the prepare_lock to synchronize with in __clk_core_init()
3459 * callers of clk_core_fill_parent_index() where we treat hw->core in __clk_core_init()
3463 core->hw->core = core; in __clk_core_init()
3465 ret = clk_pm_runtime_get(core); in __clk_core_init()
3470 if (clk_core_lookup(core->name)) { in __clk_core_init()
3472 __func__, core->name); in __clk_core_init()
3478 if (core->ops->set_rate && in __clk_core_init()
3479 !((core->ops->round_rate || core->ops->determine_rate) && in __clk_core_init()
3480 core->ops->recalc_rate)) { in __clk_core_init()
3482 __func__, core->name); in __clk_core_init()
3487 if (core->ops->set_parent && !core->ops->get_parent) { in __clk_core_init()
3489 __func__, core->name); in __clk_core_init()
3494 if (core->num_parents > 1 && !core->ops->get_parent) { in __clk_core_init()
3496 __func__, core->name); in __clk_core_init()
3501 if (core->ops->set_rate_and_parent && in __clk_core_init()
3502 !(core->ops->set_parent && core->ops->set_rate)) { in __clk_core_init()
3504 __func__, core->name); in __clk_core_init()
3523 if (core->ops->init) { in __clk_core_init()
3524 ret = core->ops->init(core->hw); in __clk_core_init()
3529 parent = core->parent = __clk_init_parent(core); in __clk_core_init()
3532 * Populate core->parent if parent has already been clk_core_init'd. If in __clk_core_init()
3542 hlist_add_head(&core->child_node, &parent->children); in __clk_core_init()
3543 core->orphan = parent->orphan; in __clk_core_init()
3544 } else if (!core->num_parents) { in __clk_core_init()
3545 hlist_add_head(&core->child_node, &clk_root_list); in __clk_core_init()
3546 core->orphan = false; in __clk_core_init()
3548 hlist_add_head(&core->child_node, &clk_orphan_list); in __clk_core_init()
3549 core->orphan = true; in __clk_core_init()
3559 if (core->ops->recalc_accuracy) in __clk_core_init()
3560 core->accuracy = core->ops->recalc_accuracy(core->hw, in __clk_core_init()
3563 core->accuracy = parent->accuracy; in __clk_core_init()
3565 core->accuracy = 0; in __clk_core_init()
3572 phase = clk_core_get_phase(core); in __clk_core_init()
3576 core->name); in __clk_core_init()
3583 clk_core_update_duty_cycle_nolock(core); in __clk_core_init()
3591 if (core->ops->recalc_rate) in __clk_core_init()
3592 rate = core->ops->recalc_rate(core->hw, in __clk_core_init()
3598 core->rate = core->req_rate = rate; in __clk_core_init()
3605 if (core->flags & CLK_IS_CRITICAL) { in __clk_core_init()
3608 ret = clk_core_prepare(core); in __clk_core_init()
3611 __func__, core->name); in __clk_core_init()
3616 ret = clk_core_enable(core); in __clk_core_init()
3620 __func__, core->name); in __clk_core_init()
3621 clk_core_unprepare(core); in __clk_core_init()
3629 kref_init(&core->ref); in __clk_core_init()
3631 clk_pm_runtime_put(core); in __clk_core_init()
3634 hlist_del_init(&core->child_node); in __clk_core_init()
3635 core->hw->core = NULL; in __clk_core_init()
3641 clk_debug_register(core); in __clk_core_init()
3648 * @core: clk to add consumer to
3651 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk) in clk_core_link_consumer() argument
3654 hlist_add_head(&clk->clks_node, &core->clks); in clk_core_link_consumer()
3670 * @core: clk to allocate a consumer for
3676 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id, in alloc_clk() argument
3685 clk->core = core; in alloc_clk()
3722 struct clk_core *core; in clk_hw_create_clk() local
3728 core = hw->core; in clk_hw_create_clk()
3729 clk = alloc_clk(core, dev_id, con_id); in clk_hw_create_clk()
3734 if (!try_module_get(core->owner)) { in clk_hw_create_clk()
3739 kref_get(&core->ref); in clk_hw_create_clk()
3740 clk_core_link_consumer(core, clk); in clk_hw_create_clk()
3762 static int clk_core_populate_parent_map(struct clk_core *core, in clk_core_populate_parent_map() argument
3780 core->parents = parents; in clk_core_populate_parent_map()
3791 __func__, core->name); in clk_core_populate_parent_map()
3824 static void clk_core_free_parent_map(struct clk_core *core) in clk_core_free_parent_map() argument
3826 int i = core->num_parents; in clk_core_free_parent_map()
3828 if (!core->num_parents) in clk_core_free_parent_map()
3832 kfree_const(core->parents[i].name); in clk_core_free_parent_map()
3833 kfree_const(core->parents[i].fw_name); in clk_core_free_parent_map()
3836 kfree(core->parents); in clk_core_free_parent_map()
3843 struct clk_core *core; in __clk_register() local
3849 * we catch use of hw->init early on in the core. in __clk_register()
3853 core = kzalloc(sizeof(*core), GFP_KERNEL); in __clk_register()
3854 if (!core) { in __clk_register()
3859 core->name = kstrdup_const(init->name, GFP_KERNEL); in __clk_register()
3860 if (!core->name) { in __clk_register()
3869 core->ops = init->ops; in __clk_register()
3872 core->rpm_enabled = true; in __clk_register()
3873 core->dev = dev; in __clk_register()
3874 core->of_node = np; in __clk_register()
3876 core->owner = dev->driver->owner; in __clk_register()
3877 core->hw = hw; in __clk_register()
3878 core->flags = init->flags; in __clk_register()
3879 core->num_parents = init->num_parents; in __clk_register()
3880 core->min_rate = 0; in __clk_register()
3881 core->max_rate = ULONG_MAX; in __clk_register()
3883 ret = clk_core_populate_parent_map(core, init); in __clk_register()
3887 INIT_HLIST_HEAD(&core->clks); in __clk_register()
3893 hw->clk = alloc_clk(core, NULL, NULL); in __clk_register()
3899 clk_core_link_consumer(core, hw->clk); in __clk_register()
3901 ret = __clk_core_init(core); in __clk_register()
3913 clk_core_free_parent_map(core); in __clk_register()
3916 kfree_const(core->name); in __clk_register()
3918 kfree(core); in __clk_register()
4001 struct clk_core *core = container_of(ref, struct clk_core, ref); in __clk_release() local
4005 clk_core_free_parent_map(core); in __clk_release()
4006 kfree_const(core->name); in __clk_release()
4007 kfree(core); in __clk_release()
4052 if (root->parents[i].core == target) in clk_core_evict_parent_cache_subtree()
4053 root->parents[i].core = NULL; in clk_core_evict_parent_cache_subtree()
4060 static void clk_core_evict_parent_cache(struct clk_core *core) in clk_core_evict_parent_cache() argument
4069 clk_core_evict_parent_cache_subtree(root, core); in clk_core_evict_parent_cache()
4085 clk_debug_unregister(clk->core); in clk_unregister()
4089 ops = clk->core->ops; in clk_unregister()
4092 clk->core->name); in clk_unregister()
4100 clk->core->ops = &clk_nodrv_ops; in clk_unregister()
4104 ops->terminate(clk->core->hw); in clk_unregister()
4106 if (!hlist_empty(&clk->core->children)) { in clk_unregister()
4111 hlist_for_each_entry_safe(child, t, &clk->core->children, in clk_unregister()
4116 clk_core_evict_parent_cache(clk->core); in clk_unregister()
4118 hlist_del_init(&clk->core->child_node); in clk_unregister()
4120 if (clk->core->prepare_count) in clk_unregister()
4122 __func__, clk->core->name); in clk_unregister()
4124 if (clk->core->protect_count) in clk_unregister()
4126 __func__, clk->core->name); in clk_unregister()
4128 kref_put(&clk->core->ref, __clk_release); in clk_unregister()
4284 clk->core->protect_count -= (clk->exclusive_count - 1); in __clk_put()
4285 clk_core_rate_unprotect(clk->core); in __clk_put()
4290 if (clk->min_rate > clk->core->req_rate || in __clk_put()
4291 clk->max_rate < clk->core->req_rate) in __clk_put()
4292 clk_core_set_rate_nolock(clk->core, clk->core->req_rate); in __clk_put()
4294 owner = clk->core->owner; in __clk_put()
4295 kref_put(&clk->core->ref, __clk_release); in __clk_put()
4354 clk->core->notifier_count++; in clk_notifier_register()
4388 clk->core->notifier_count--; in clk_notifier_unregister()