• Home
  • Raw
  • Download

Lines Matching refs:genpd

30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\  argument
35 __routine = genpd->dev_ops.callback; \
46 void (*lock)(struct generic_pm_domain *genpd);
47 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48 int (*lock_interruptible)(struct generic_pm_domain *genpd);
49 void (*unlock)(struct generic_pm_domain *genpd);
52 static void genpd_lock_mtx(struct generic_pm_domain *genpd) in genpd_lock_mtx() argument
54 mutex_lock(&genpd->mlock); in genpd_lock_mtx()
57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, in genpd_lock_nested_mtx() argument
60 mutex_lock_nested(&genpd->mlock, depth); in genpd_lock_nested_mtx()
63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) in genpd_lock_interruptible_mtx() argument
65 return mutex_lock_interruptible(&genpd->mlock); in genpd_lock_interruptible_mtx()
68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd) in genpd_unlock_mtx() argument
70 return mutex_unlock(&genpd->mlock); in genpd_unlock_mtx()
80 static void genpd_lock_spin(struct generic_pm_domain *genpd) in genpd_lock_spin() argument
81 __acquires(&genpd->slock) in genpd_lock_spin()
85 spin_lock_irqsave(&genpd->slock, flags); in genpd_lock_spin()
86 genpd->lock_flags = flags; in genpd_lock_spin()
89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, in genpd_lock_nested_spin() argument
91 __acquires(&genpd->slock) in genpd_lock_nested_spin()
95 spin_lock_irqsave_nested(&genpd->slock, flags, depth); in genpd_lock_nested_spin()
96 genpd->lock_flags = flags; in genpd_lock_nested_spin()
99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) in genpd_lock_interruptible_spin() argument
100 __acquires(&genpd->slock) in genpd_lock_interruptible_spin()
104 spin_lock_irqsave(&genpd->slock, flags); in genpd_lock_interruptible_spin()
105 genpd->lock_flags = flags; in genpd_lock_interruptible_spin()
109 static void genpd_unlock_spin(struct generic_pm_domain *genpd) in genpd_unlock_spin() argument
110 __releases(&genpd->slock) in genpd_unlock_spin()
112 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); in genpd_unlock_spin()
127 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) argument
128 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) argument
129 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) argument
130 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) argument
131 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) argument
132 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) argument
135 const struct generic_pm_domain *genpd) in irq_safe_dev_in_no_sleep_domain() argument
139 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); in irq_safe_dev_in_no_sleep_domain()
146 if (ret && !genpd_is_always_on(genpd)) in irq_safe_dev_in_no_sleep_domain()
148 genpd->name); in irq_safe_dev_in_no_sleep_domain()
185 static int genpd_stop_dev(const struct generic_pm_domain *genpd, in genpd_stop_dev() argument
188 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); in genpd_stop_dev()
191 static int genpd_start_dev(const struct generic_pm_domain *genpd, in genpd_start_dev() argument
194 return GENPD_DEV_CALLBACK(genpd, int, start, dev); in genpd_start_dev()
197 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) in genpd_sd_counter_dec() argument
201 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) in genpd_sd_counter_dec()
202 ret = !!atomic_dec_and_test(&genpd->sd_count); in genpd_sd_counter_dec()
207 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) in genpd_sd_counter_inc() argument
209 atomic_inc(&genpd->sd_count); in genpd_sd_counter_inc()
216 static void genpd_debug_add(struct generic_pm_domain *genpd);
218 static void genpd_debug_remove(struct generic_pm_domain *genpd) in genpd_debug_remove() argument
223 debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir); in genpd_debug_remove()
226 static void genpd_update_accounting(struct generic_pm_domain *genpd) in genpd_update_accounting() argument
231 delta = ktime_sub(now, genpd->accounting_time); in genpd_update_accounting()
238 if (genpd->status == GENPD_STATE_ON) { in genpd_update_accounting()
239 int state_idx = genpd->state_idx; in genpd_update_accounting()
241 genpd->states[state_idx].idle_time = in genpd_update_accounting()
242 ktime_add(genpd->states[state_idx].idle_time, delta); in genpd_update_accounting()
244 genpd->on_time = ktime_add(genpd->on_time, delta); in genpd_update_accounting()
247 genpd->accounting_time = now; in genpd_update_accounting()
250 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {} in genpd_debug_add() argument
251 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {} in genpd_debug_remove() argument
252 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} in genpd_update_accounting() argument
255 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, in _genpd_reeval_performance_state() argument
263 if (state == genpd->performance_state) in _genpd_reeval_performance_state()
267 if (state > genpd->performance_state) in _genpd_reeval_performance_state()
271 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in _genpd_reeval_performance_state()
292 list_for_each_entry(link, &genpd->parent_links, parent_node) { in _genpd_reeval_performance_state()
300 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd, in genpd_xlate_performance_state() argument
307 return dev_pm_opp_xlate_performance_state(genpd->opp_table, in genpd_xlate_performance_state()
312 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, in _genpd_set_performance_state() argument
319 if (state == genpd->performance_state) in _genpd_set_performance_state()
323 list_for_each_entry(link, &genpd->child_links, child_node) { in _genpd_set_performance_state()
327 ret = genpd_xlate_performance_state(genpd, parent, state); in _genpd_set_performance_state()
349 if (genpd->set_performance_state) { in _genpd_set_performance_state()
350 ret = genpd->set_performance_state(genpd, state); in _genpd_set_performance_state()
355 genpd->performance_state = state; in _genpd_set_performance_state()
360 list_for_each_entry_continue_reverse(link, &genpd->child_links, in _genpd_set_performance_state()
384 struct generic_pm_domain *genpd = dev_to_genpd(dev); in genpd_set_performance_state() local
394 state = _genpd_reeval_performance_state(genpd, state); in genpd_set_performance_state()
396 ret = _genpd_set_performance_state(genpd, state, 0); in genpd_set_performance_state()
437 struct generic_pm_domain *genpd; in dev_pm_genpd_set_performance_state() local
440 genpd = dev_to_genpd_safe(dev); in dev_pm_genpd_set_performance_state()
441 if (!genpd) in dev_pm_genpd_set_performance_state()
448 genpd_lock(genpd); in dev_pm_genpd_set_performance_state()
456 genpd_unlock(genpd); in dev_pm_genpd_set_performance_state()
480 struct generic_pm_domain *genpd; in dev_pm_genpd_set_next_wakeup() local
482 genpd = dev_to_genpd_safe(dev); in dev_pm_genpd_set_next_wakeup()
483 if (!genpd) in dev_pm_genpd_set_next_wakeup()
491 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) in _genpd_power_on() argument
493 unsigned int state_idx = genpd->state_idx; in _genpd_power_on()
499 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, in _genpd_power_on()
506 if (!genpd->power_on) in _genpd_power_on()
510 ret = genpd->power_on(genpd); in _genpd_power_on()
518 ret = genpd->power_on(genpd); in _genpd_power_on()
523 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) in _genpd_power_on()
526 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; in _genpd_power_on()
527 genpd->max_off_time_changed = true; in _genpd_power_on()
529 genpd->name, "on", elapsed_ns); in _genpd_power_on()
532 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); in _genpd_power_on()
535 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, in _genpd_power_on()
540 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) in _genpd_power_off() argument
542 unsigned int state_idx = genpd->state_idx; in _genpd_power_off()
548 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, in _genpd_power_off()
555 if (!genpd->power_off) in _genpd_power_off()
559 ret = genpd->power_off(genpd); in _genpd_power_off()
567 ret = genpd->power_off(genpd); in _genpd_power_off()
572 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) in _genpd_power_off()
575 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; in _genpd_power_off()
576 genpd->max_off_time_changed = true; in _genpd_power_off()
578 genpd->name, "off", elapsed_ns); in _genpd_power_off()
581 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, in _genpd_power_off()
585 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); in _genpd_power_off()
596 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) in genpd_queue_power_off_work() argument
598 queue_work(pm_wq, &genpd->power_off_work); in genpd_queue_power_off_work()
613 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, in genpd_power_off() argument
626 if (!genpd_status_on(genpd) || genpd->prepared_count > 0) in genpd_power_off()
634 if (genpd_is_always_on(genpd) || in genpd_power_off()
635 genpd_is_rpm_always_on(genpd) || in genpd_power_off()
636 atomic_read(&genpd->sd_count) > 0) in genpd_power_off()
639 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in genpd_power_off()
651 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) in genpd_power_off()
658 if (genpd->gov && genpd->gov->power_down_ok) { in genpd_power_off()
659 if (!genpd->gov->power_down_ok(&genpd->domain)) in genpd_power_off()
664 if (!genpd->gov) in genpd_power_off()
665 genpd->state_idx = 0; in genpd_power_off()
668 if (atomic_read(&genpd->sd_count) > 0) in genpd_power_off()
671 ret = _genpd_power_off(genpd, true); in genpd_power_off()
673 genpd->states[genpd->state_idx].rejected++; in genpd_power_off()
677 genpd->status = GENPD_STATE_OFF; in genpd_power_off()
678 genpd_update_accounting(genpd); in genpd_power_off()
679 genpd->states[genpd->state_idx].usage++; in genpd_power_off()
681 list_for_each_entry(link, &genpd->child_links, child_node) { in genpd_power_off()
699 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) in genpd_power_on() argument
704 if (genpd_status_on(genpd)) in genpd_power_on()
712 list_for_each_entry(link, &genpd->child_links, child_node) { in genpd_power_on()
727 ret = _genpd_power_on(genpd, true); in genpd_power_on()
731 genpd->status = GENPD_STATE_ON; in genpd_power_on()
732 genpd_update_accounting(genpd); in genpd_power_on()
738 &genpd->child_links, in genpd_power_on()
751 struct generic_pm_domain *genpd = dev_to_genpd(dev); in genpd_dev_pm_start() local
753 return genpd_start_dev(genpd, dev); in genpd_dev_pm_start()
766 struct generic_pm_domain *genpd; in genpd_dev_pm_qos_notifier() local
775 genpd = dev_to_genpd(dev); in genpd_dev_pm_qos_notifier()
777 genpd = ERR_PTR(-ENODATA); in genpd_dev_pm_qos_notifier()
782 if (!IS_ERR(genpd)) { in genpd_dev_pm_qos_notifier()
783 genpd_lock(genpd); in genpd_dev_pm_qos_notifier()
784 genpd->max_off_time_changed = true; in genpd_dev_pm_qos_notifier()
785 genpd_unlock(genpd); in genpd_dev_pm_qos_notifier()
802 struct generic_pm_domain *genpd; in genpd_power_off_work_fn() local
804 genpd = container_of(work, struct generic_pm_domain, power_off_work); in genpd_power_off_work_fn()
806 genpd_lock(genpd); in genpd_power_off_work_fn()
807 genpd_power_off(genpd, false, 0); in genpd_power_off_work_fn()
808 genpd_unlock(genpd); in genpd_power_off_work_fn()
867 struct generic_pm_domain *genpd; in genpd_runtime_suspend() local
878 genpd = dev_to_genpd(dev); in genpd_runtime_suspend()
879 if (IS_ERR(genpd)) in genpd_runtime_suspend()
888 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; in genpd_runtime_suspend()
901 ret = genpd_stop_dev(genpd, dev); in genpd_runtime_suspend()
914 genpd->max_off_time_changed = true; in genpd_runtime_suspend()
923 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) in genpd_runtime_suspend()
926 genpd_lock(genpd); in genpd_runtime_suspend()
928 genpd_power_off(genpd, true, 0); in genpd_runtime_suspend()
929 genpd_unlock(genpd); in genpd_runtime_suspend()
944 struct generic_pm_domain *genpd; in genpd_runtime_resume() local
955 genpd = dev_to_genpd(dev); in genpd_runtime_resume()
956 if (IS_ERR(genpd)) in genpd_runtime_resume()
963 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) { in genpd_runtime_resume()
968 genpd_lock(genpd); in genpd_runtime_resume()
969 ret = genpd_power_on(genpd, 0); in genpd_runtime_resume()
972 genpd_unlock(genpd); in genpd_runtime_resume()
983 ret = genpd_start_dev(genpd, dev); in genpd_runtime_resume()
998 genpd->max_off_time_changed = true; in genpd_runtime_resume()
1006 genpd_stop_dev(genpd, dev); in genpd_runtime_resume()
1008 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) { in genpd_runtime_resume()
1009 genpd_lock(genpd); in genpd_runtime_resume()
1011 genpd_power_off(genpd, true, 0); in genpd_runtime_resume()
1012 genpd_unlock(genpd); in genpd_runtime_resume()
1031 struct generic_pm_domain *genpd; in genpd_power_off_unused() local
1040 list_for_each_entry(genpd, &gpd_list, gpd_list_node) in genpd_power_off_unused()
1041 genpd_queue_power_off_work(genpd); in genpd_power_off_unused()
1064 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, in genpd_sync_power_off() argument
1069 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) in genpd_sync_power_off()
1072 if (genpd->suspended_count != genpd->device_count in genpd_sync_power_off()
1073 || atomic_read(&genpd->sd_count) > 0) in genpd_sync_power_off()
1077 genpd->state_idx = genpd->state_count - 1; in genpd_sync_power_off()
1078 if (_genpd_power_off(genpd, false)) in genpd_sync_power_off()
1081 genpd->status = GENPD_STATE_OFF; in genpd_sync_power_off()
1083 list_for_each_entry(link, &genpd->child_links, child_node) { in genpd_sync_power_off()
1106 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, in genpd_sync_power_on() argument
1111 if (genpd_status_on(genpd)) in genpd_sync_power_on()
1114 list_for_each_entry(link, &genpd->child_links, child_node) { in genpd_sync_power_on()
1126 _genpd_power_on(genpd, false); in genpd_sync_power_on()
1127 genpd->status = GENPD_STATE_ON; in genpd_sync_power_on()
1141 struct generic_pm_domain *genpd; in genpd_prepare() local
1146 genpd = dev_to_genpd(dev); in genpd_prepare()
1147 if (IS_ERR(genpd)) in genpd_prepare()
1150 genpd_lock(genpd); in genpd_prepare()
1152 if (genpd->prepared_count++ == 0) in genpd_prepare()
1153 genpd->suspended_count = 0; in genpd_prepare()
1155 genpd_unlock(genpd); in genpd_prepare()
1159 genpd_lock(genpd); in genpd_prepare()
1161 genpd->prepared_count--; in genpd_prepare()
1163 genpd_unlock(genpd); in genpd_prepare()
1181 struct generic_pm_domain *genpd; in genpd_finish_suspend() local
1184 genpd = dev_to_genpd(dev); in genpd_finish_suspend()
1185 if (IS_ERR(genpd)) in genpd_finish_suspend()
1195 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) in genpd_finish_suspend()
1198 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_finish_suspend()
1200 ret = genpd_stop_dev(genpd, dev); in genpd_finish_suspend()
1210 genpd_lock(genpd); in genpd_finish_suspend()
1211 genpd->suspended_count++; in genpd_finish_suspend()
1212 genpd_sync_power_off(genpd, true, 0); in genpd_finish_suspend()
1213 genpd_unlock(genpd); in genpd_finish_suspend()
1240 struct generic_pm_domain *genpd; in genpd_resume_noirq() local
1245 genpd = dev_to_genpd(dev); in genpd_resume_noirq()
1246 if (IS_ERR(genpd)) in genpd_resume_noirq()
1249 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) in genpd_resume_noirq()
1252 genpd_lock(genpd); in genpd_resume_noirq()
1253 genpd_sync_power_on(genpd, true, 0); in genpd_resume_noirq()
1254 genpd->suspended_count--; in genpd_resume_noirq()
1255 genpd_unlock(genpd); in genpd_resume_noirq()
1257 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_resume_noirq()
1259 ret = genpd_start_dev(genpd, dev); in genpd_resume_noirq()
1278 const struct generic_pm_domain *genpd; in genpd_freeze_noirq() local
1283 genpd = dev_to_genpd(dev); in genpd_freeze_noirq()
1284 if (IS_ERR(genpd)) in genpd_freeze_noirq()
1291 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_freeze_noirq()
1293 ret = genpd_stop_dev(genpd, dev); in genpd_freeze_noirq()
1307 const struct generic_pm_domain *genpd; in genpd_thaw_noirq() local
1312 genpd = dev_to_genpd(dev); in genpd_thaw_noirq()
1313 if (IS_ERR(genpd)) in genpd_thaw_noirq()
1316 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_thaw_noirq()
1318 ret = genpd_start_dev(genpd, dev); in genpd_thaw_noirq()
1350 struct generic_pm_domain *genpd; in genpd_restore_noirq() local
1355 genpd = dev_to_genpd(dev); in genpd_restore_noirq()
1356 if (IS_ERR(genpd)) in genpd_restore_noirq()
1363 genpd_lock(genpd); in genpd_restore_noirq()
1364 if (genpd->suspended_count++ == 0) { in genpd_restore_noirq()
1370 genpd->status = GENPD_STATE_OFF; in genpd_restore_noirq()
1373 genpd_sync_power_on(genpd, true, 0); in genpd_restore_noirq()
1374 genpd_unlock(genpd); in genpd_restore_noirq()
1376 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_restore_noirq()
1378 ret = genpd_start_dev(genpd, dev); in genpd_restore_noirq()
1397 struct generic_pm_domain *genpd; in genpd_complete() local
1401 genpd = dev_to_genpd(dev); in genpd_complete()
1402 if (IS_ERR(genpd)) in genpd_complete()
1407 genpd_lock(genpd); in genpd_complete()
1409 genpd->prepared_count--; in genpd_complete()
1410 if (!genpd->prepared_count) in genpd_complete()
1411 genpd_queue_power_off_work(genpd); in genpd_complete()
1413 genpd_unlock(genpd); in genpd_complete()
1418 struct generic_pm_domain *genpd; in genpd_switch_state() local
1421 genpd = dev_to_genpd_safe(dev); in genpd_switch_state()
1422 if (!genpd) in genpd_switch_state()
1425 use_lock = genpd_is_irq_safe(genpd); in genpd_switch_state()
1428 genpd_lock(genpd); in genpd_switch_state()
1431 genpd->suspended_count++; in genpd_switch_state()
1432 genpd_sync_power_off(genpd, use_lock, 0); in genpd_switch_state()
1434 genpd_sync_power_on(genpd, use_lock, 0); in genpd_switch_state()
1435 genpd->suspended_count--; in genpd_switch_state()
1439 genpd_unlock(genpd); in genpd_switch_state()
1539 static void genpd_update_cpumask(struct generic_pm_domain *genpd, in genpd_update_cpumask() argument
1544 if (!genpd_is_cpu_domain(genpd)) in genpd_update_cpumask()
1547 list_for_each_entry(link, &genpd->child_links, child_node) { in genpd_update_cpumask()
1556 cpumask_set_cpu(cpu, genpd->cpus); in genpd_update_cpumask()
1558 cpumask_clear_cpu(cpu, genpd->cpus); in genpd_update_cpumask()
1561 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) in genpd_set_cpumask() argument
1564 genpd_update_cpumask(genpd, cpu, true, 0); in genpd_set_cpumask()
1567 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) in genpd_clear_cpumask() argument
1570 genpd_update_cpumask(genpd, cpu, false, 0); in genpd_clear_cpumask()
1573 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) in genpd_get_cpu() argument
1577 if (!genpd_is_cpu_domain(genpd)) in genpd_get_cpu()
1588 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, in genpd_add_device() argument
1596 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) in genpd_add_device()
1603 gpd_data->cpu = genpd_get_cpu(genpd, base_dev); in genpd_add_device()
1605 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; in genpd_add_device()
1609 genpd_lock(genpd); in genpd_add_device()
1611 genpd_set_cpumask(genpd, gpd_data->cpu); in genpd_add_device()
1612 dev_pm_domain_set(dev, &genpd->domain); in genpd_add_device()
1614 genpd->device_count++; in genpd_add_device()
1615 genpd->max_off_time_changed = true; in genpd_add_device()
1617 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); in genpd_add_device()
1619 genpd_unlock(genpd); in genpd_add_device()
1635 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) in pm_genpd_add_device() argument
1640 ret = genpd_add_device(genpd, dev, dev); in pm_genpd_add_device()
1647 static int genpd_remove_device(struct generic_pm_domain *genpd, in genpd_remove_device() argument
1661 genpd_lock(genpd); in genpd_remove_device()
1663 if (genpd->prepared_count > 0) { in genpd_remove_device()
1668 genpd->device_count--; in genpd_remove_device()
1669 genpd->max_off_time_changed = true; in genpd_remove_device()
1671 genpd_clear_cpumask(genpd, gpd_data->cpu); in genpd_remove_device()
1676 genpd_unlock(genpd); in genpd_remove_device()
1678 if (genpd->detach_dev) in genpd_remove_device()
1679 genpd->detach_dev(genpd, dev); in genpd_remove_device()
1686 genpd_unlock(genpd); in genpd_remove_device()
1698 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); in pm_genpd_remove_device() local
1700 if (!genpd) in pm_genpd_remove_device()
1703 return genpd_remove_device(genpd, dev); in pm_genpd_remove_device()
1724 struct generic_pm_domain *genpd; in dev_pm_genpd_add_notifier() local
1728 genpd = dev_to_genpd_safe(dev); in dev_pm_genpd_add_notifier()
1729 if (!genpd) in dev_pm_genpd_add_notifier()
1740 genpd_lock(genpd); in dev_pm_genpd_add_notifier()
1741 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb); in dev_pm_genpd_add_notifier()
1742 genpd_unlock(genpd); in dev_pm_genpd_add_notifier()
1746 genpd->name); in dev_pm_genpd_add_notifier()
1770 struct generic_pm_domain *genpd; in dev_pm_genpd_remove_notifier() local
1774 genpd = dev_to_genpd_safe(dev); in dev_pm_genpd_remove_notifier()
1775 if (!genpd) in dev_pm_genpd_remove_notifier()
1786 genpd_lock(genpd); in dev_pm_genpd_remove_notifier()
1787 ret = raw_notifier_chain_unregister(&genpd->power_notifiers, in dev_pm_genpd_remove_notifier()
1789 genpd_unlock(genpd); in dev_pm_genpd_remove_notifier()
1793 genpd->name); in dev_pm_genpd_remove_notifier()
1802 static int genpd_add_subdomain(struct generic_pm_domain *genpd, in genpd_add_subdomain() argument
1808 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) in genpd_add_subdomain()
1809 || genpd == subdomain) in genpd_add_subdomain()
1817 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { in genpd_add_subdomain()
1819 genpd->name, subdomain->name); in genpd_add_subdomain()
1828 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); in genpd_add_subdomain()
1830 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { in genpd_add_subdomain()
1835 list_for_each_entry(itr, &genpd->parent_links, parent_node) { in genpd_add_subdomain()
1836 if (itr->child == subdomain && itr->parent == genpd) { in genpd_add_subdomain()
1842 link->parent = genpd; in genpd_add_subdomain()
1843 list_add_tail(&link->parent_node, &genpd->parent_links); in genpd_add_subdomain()
1847 genpd_sd_counter_inc(genpd); in genpd_add_subdomain()
1850 genpd_unlock(genpd); in genpd_add_subdomain()
1862 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, in pm_genpd_add_subdomain() argument
1868 ret = genpd_add_subdomain(genpd, subdomain); in pm_genpd_add_subdomain()
1880 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, in pm_genpd_remove_subdomain() argument
1886 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) in pm_genpd_remove_subdomain()
1890 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); in pm_genpd_remove_subdomain()
1894 genpd->name, subdomain->name); in pm_genpd_remove_subdomain()
1899 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { in pm_genpd_remove_subdomain()
1907 genpd_sd_counter_dec(genpd); in pm_genpd_remove_subdomain()
1914 genpd_unlock(genpd); in pm_genpd_remove_subdomain()
1927 static int genpd_set_default_power_state(struct generic_pm_domain *genpd) in genpd_set_default_power_state() argument
1935 genpd->states = state; in genpd_set_default_power_state()
1936 genpd->state_count = 1; in genpd_set_default_power_state()
1937 genpd->free_states = genpd_free_default_power_state; in genpd_set_default_power_state()
1942 static void genpd_lock_init(struct generic_pm_domain *genpd) in genpd_lock_init() argument
1944 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { in genpd_lock_init()
1945 spin_lock_init(&genpd->slock); in genpd_lock_init()
1946 genpd->lock_ops = &genpd_spin_ops; in genpd_lock_init()
1948 mutex_init(&genpd->mlock); in genpd_lock_init()
1949 genpd->lock_ops = &genpd_mtx_ops; in genpd_lock_init()
1961 int pm_genpd_init(struct generic_pm_domain *genpd, in pm_genpd_init() argument
1966 if (IS_ERR_OR_NULL(genpd)) in pm_genpd_init()
1969 INIT_LIST_HEAD(&genpd->parent_links); in pm_genpd_init()
1970 INIT_LIST_HEAD(&genpd->child_links); in pm_genpd_init()
1971 INIT_LIST_HEAD(&genpd->dev_list); in pm_genpd_init()
1972 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers); in pm_genpd_init()
1973 genpd_lock_init(genpd); in pm_genpd_init()
1974 genpd->gov = gov; in pm_genpd_init()
1975 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); in pm_genpd_init()
1976 atomic_set(&genpd->sd_count, 0); in pm_genpd_init()
1977 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; in pm_genpd_init()
1978 genpd->device_count = 0; in pm_genpd_init()
1979 genpd->max_off_time_ns = -1; in pm_genpd_init()
1980 genpd->max_off_time_changed = true; in pm_genpd_init()
1981 genpd->next_wakeup = KTIME_MAX; in pm_genpd_init()
1982 genpd->provider = NULL; in pm_genpd_init()
1983 genpd->has_provider = false; in pm_genpd_init()
1984 genpd->accounting_time = ktime_get(); in pm_genpd_init()
1985 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; in pm_genpd_init()
1986 genpd->domain.ops.runtime_resume = genpd_runtime_resume; in pm_genpd_init()
1987 genpd->domain.ops.prepare = genpd_prepare; in pm_genpd_init()
1988 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; in pm_genpd_init()
1989 genpd->domain.ops.resume_noirq = genpd_resume_noirq; in pm_genpd_init()
1990 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; in pm_genpd_init()
1991 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; in pm_genpd_init()
1992 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; in pm_genpd_init()
1993 genpd->domain.ops.restore_noirq = genpd_restore_noirq; in pm_genpd_init()
1994 genpd->domain.ops.complete = genpd_complete; in pm_genpd_init()
1995 genpd->domain.start = genpd_dev_pm_start; in pm_genpd_init()
1997 if (genpd->flags & GENPD_FLAG_PM_CLK) { in pm_genpd_init()
1998 genpd->dev_ops.stop = pm_clk_suspend; in pm_genpd_init()
1999 genpd->dev_ops.start = pm_clk_resume; in pm_genpd_init()
2003 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && in pm_genpd_init()
2004 !genpd_status_on(genpd)) in pm_genpd_init()
2007 if (genpd_is_cpu_domain(genpd) && in pm_genpd_init()
2008 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) in pm_genpd_init()
2012 if (genpd->state_count == 0) { in pm_genpd_init()
2013 ret = genpd_set_default_power_state(genpd); in pm_genpd_init()
2015 if (genpd_is_cpu_domain(genpd)) in pm_genpd_init()
2016 free_cpumask_var(genpd->cpus); in pm_genpd_init()
2019 } else if (!gov && genpd->state_count > 1) { in pm_genpd_init()
2020 pr_warn("%s: no governor for states\n", genpd->name); in pm_genpd_init()
2023 device_initialize(&genpd->dev); in pm_genpd_init()
2024 dev_set_name(&genpd->dev, "%s", genpd->name); in pm_genpd_init()
2027 list_add(&genpd->gpd_list_node, &gpd_list); in pm_genpd_init()
2029 genpd_debug_add(genpd); in pm_genpd_init()
2035 static int genpd_remove(struct generic_pm_domain *genpd) in genpd_remove() argument
2039 if (IS_ERR_OR_NULL(genpd)) in genpd_remove()
2042 genpd_lock(genpd); in genpd_remove()
2044 if (genpd->has_provider) { in genpd_remove()
2045 genpd_unlock(genpd); in genpd_remove()
2046 pr_err("Provider present, unable to remove %s\n", genpd->name); in genpd_remove()
2050 if (!list_empty(&genpd->parent_links) || genpd->device_count) { in genpd_remove()
2051 genpd_unlock(genpd); in genpd_remove()
2052 pr_err("%s: unable to remove %s\n", __func__, genpd->name); in genpd_remove()
2056 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { in genpd_remove()
2062 list_del(&genpd->gpd_list_node); in genpd_remove()
2063 genpd_unlock(genpd); in genpd_remove()
2064 genpd_debug_remove(genpd); in genpd_remove()
2065 cancel_work_sync(&genpd->power_off_work); in genpd_remove()
2066 if (genpd_is_cpu_domain(genpd)) in genpd_remove()
2067 free_cpumask_var(genpd->cpus); in genpd_remove()
2068 if (genpd->free_states) in genpd_remove()
2069 genpd->free_states(genpd->states, genpd->state_count); in genpd_remove()
2071 pr_debug("%s: removed %s\n", __func__, genpd->name); in genpd_remove()
2089 int pm_genpd_remove(struct generic_pm_domain *genpd) in pm_genpd_remove() argument
2094 ret = genpd_remove(genpd); in pm_genpd_remove()
2214 static bool genpd_present(const struct generic_pm_domain *genpd) in genpd_present() argument
2221 if (gpd == genpd) { in genpd_present()
2237 struct generic_pm_domain *genpd) in of_genpd_add_provider_simple() argument
2241 if (!np || !genpd) in of_genpd_add_provider_simple()
2244 if (!genpd_present(genpd)) in of_genpd_add_provider_simple()
2247 genpd->dev.of_node = np; in of_genpd_add_provider_simple()
2250 if (genpd->set_performance_state) { in of_genpd_add_provider_simple()
2251 ret = dev_pm_opp_of_add_table(&genpd->dev); in of_genpd_add_provider_simple()
2254 dev_err(&genpd->dev, "Failed to add OPP table: %d\n", in of_genpd_add_provider_simple()
2263 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); in of_genpd_add_provider_simple()
2264 WARN_ON(IS_ERR(genpd->opp_table)); in of_genpd_add_provider_simple()
2267 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); in of_genpd_add_provider_simple()
2269 if (genpd->set_performance_state) { in of_genpd_add_provider_simple()
2270 dev_pm_opp_put_opp_table(genpd->opp_table); in of_genpd_add_provider_simple()
2271 dev_pm_opp_of_remove_table(&genpd->dev); in of_genpd_add_provider_simple()
2277 genpd->provider = &np->fwnode; in of_genpd_add_provider_simple()
2278 genpd->has_provider = true; in of_genpd_add_provider_simple()
2292 struct generic_pm_domain *genpd; in of_genpd_add_provider_onecell() local
2303 genpd = data->domains[i]; in of_genpd_add_provider_onecell()
2305 if (!genpd) in of_genpd_add_provider_onecell()
2307 if (!genpd_present(genpd)) in of_genpd_add_provider_onecell()
2310 genpd->dev.of_node = np; in of_genpd_add_provider_onecell()
2313 if (genpd->set_performance_state) { in of_genpd_add_provider_onecell()
2314 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); in of_genpd_add_provider_onecell()
2317 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n", in of_genpd_add_provider_onecell()
2326 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); in of_genpd_add_provider_onecell()
2327 WARN_ON(IS_ERR(genpd->opp_table)); in of_genpd_add_provider_onecell()
2330 genpd->provider = &np->fwnode; in of_genpd_add_provider_onecell()
2331 genpd->has_provider = true; in of_genpd_add_provider_onecell()
2342 genpd = data->domains[i]; in of_genpd_add_provider_onecell()
2344 if (!genpd) in of_genpd_add_provider_onecell()
2347 genpd->provider = NULL; in of_genpd_add_provider_onecell()
2348 genpd->has_provider = false; in of_genpd_add_provider_onecell()
2350 if (genpd->set_performance_state) { in of_genpd_add_provider_onecell()
2351 dev_pm_opp_put_opp_table(genpd->opp_table); in of_genpd_add_provider_onecell()
2352 dev_pm_opp_of_remove_table(&genpd->dev); in of_genpd_add_provider_onecell()
2416 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); in genpd_get_from_provider() local
2427 genpd = provider->xlate(genpdspec, provider->data); in genpd_get_from_provider()
2428 if (!IS_ERR(genpd)) in genpd_get_from_provider()
2434 return genpd; in genpd_get_from_provider()
2447 struct generic_pm_domain *genpd; in of_genpd_add_device() local
2452 genpd = genpd_get_from_provider(genpdspec); in of_genpd_add_device()
2453 if (IS_ERR(genpd)) { in of_genpd_add_device()
2454 ret = PTR_ERR(genpd); in of_genpd_add_device()
2458 ret = genpd_add_device(genpd, dev, dev); in of_genpd_add_device()
2558 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); in of_genpd_remove_last() local
2568 genpd = ret ? ERR_PTR(ret) : gpd; in of_genpd_remove_last()
2574 return genpd; in of_genpd_remove_last()
2969 struct generic_pm_domain *genpd = NULL; in pm_genpd_opp_to_performance_state() local
2972 genpd = container_of(genpd_dev, struct generic_pm_domain, dev); in pm_genpd_opp_to_performance_state()
2974 if (unlikely(!genpd->opp_to_performance_state)) in pm_genpd_opp_to_performance_state()
2977 genpd_lock(genpd); in pm_genpd_opp_to_performance_state()
2978 state = genpd->opp_to_performance_state(genpd, opp); in pm_genpd_opp_to_performance_state()
2979 genpd_unlock(genpd); in pm_genpd_opp_to_performance_state()
3032 struct generic_pm_domain *genpd) in genpd_summary_one() argument
3044 ret = genpd_lock_interruptible(genpd); in genpd_summary_one()
3048 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) in genpd_summary_one()
3050 if (!genpd_status_on(genpd)) in genpd_summary_one()
3052 status_lookup[genpd->status], genpd->state_idx); in genpd_summary_one()
3055 status_lookup[genpd->status]); in genpd_summary_one()
3056 seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); in genpd_summary_one()
3063 list_for_each_entry(link, &genpd->parent_links, parent_node) { in genpd_summary_one()
3064 if (list_is_first(&link->parent_node, &genpd->parent_links)) in genpd_summary_one()
3067 if (!list_is_last(&link->parent_node, &genpd->parent_links)) in genpd_summary_one()
3071 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { in genpd_summary_one()
3073 genpd_is_irq_safe(genpd) ? in genpd_summary_one()
3086 genpd_unlock(genpd); in genpd_summary_one()
3093 struct generic_pm_domain *genpd; in summary_show() local
3104 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { in summary_show()
3105 ret = genpd_summary_one(s, genpd); in summary_show()
3121 struct generic_pm_domain *genpd = s->private; in status_show() local
3124 ret = genpd_lock_interruptible(genpd); in status_show()
3128 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) in status_show()
3131 if (genpd->status == GENPD_STATE_OFF) in status_show()
3132 seq_printf(s, "%s-%u\n", status_lookup[genpd->status], in status_show()
3133 genpd->state_idx); in status_show()
3135 seq_printf(s, "%s\n", status_lookup[genpd->status]); in status_show()
3137 genpd_unlock(genpd); in status_show()
3143 struct generic_pm_domain *genpd = s->private; in sub_domains_show() local
3147 ret = genpd_lock_interruptible(genpd); in sub_domains_show()
3151 list_for_each_entry(link, &genpd->parent_links, parent_node) in sub_domains_show()
3154 genpd_unlock(genpd); in sub_domains_show()
3160 struct generic_pm_domain *genpd = s->private; in idle_states_show() local
3164 ret = genpd_lock_interruptible(genpd); in idle_states_show()
3170 for (i = 0; i < genpd->state_count; i++) { in idle_states_show()
3174 if ((genpd->status == GENPD_STATE_OFF) && in idle_states_show()
3175 (genpd->state_idx == i)) in idle_states_show()
3176 delta = ktime_sub(ktime_get(), genpd->accounting_time); in idle_states_show()
3179 ktime_add(genpd->states[i].idle_time, delta)); in idle_states_show()
3181 genpd->states[i].usage, genpd->states[i].rejected); in idle_states_show()
3184 genpd_unlock(genpd); in idle_states_show()
3190 struct generic_pm_domain *genpd = s->private; in active_time_show() local
3194 ret = genpd_lock_interruptible(genpd); in active_time_show()
3198 if (genpd->status == GENPD_STATE_ON) in active_time_show()
3199 delta = ktime_sub(ktime_get(), genpd->accounting_time); in active_time_show()
3202 ktime_add(genpd->on_time, delta))); in active_time_show()
3204 genpd_unlock(genpd); in active_time_show()
3210 struct generic_pm_domain *genpd = s->private; in total_idle_time_show() local
3215 ret = genpd_lock_interruptible(genpd); in total_idle_time_show()
3219 for (i = 0; i < genpd->state_count; i++) { in total_idle_time_show()
3221 if ((genpd->status == GENPD_STATE_OFF) && in total_idle_time_show()
3222 (genpd->state_idx == i)) in total_idle_time_show()
3223 delta = ktime_sub(ktime_get(), genpd->accounting_time); in total_idle_time_show()
3225 total = ktime_add(total, genpd->states[i].idle_time); in total_idle_time_show()
3231 genpd_unlock(genpd); in total_idle_time_show()
3238 struct generic_pm_domain *genpd = s->private; in devices_show() local
3243 ret = genpd_lock_interruptible(genpd); in devices_show()
3247 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { in devices_show()
3249 genpd_is_irq_safe(genpd) ? in devices_show()
3258 genpd_unlock(genpd); in devices_show()
3264 struct generic_pm_domain *genpd = s->private; in perf_state_show() local
3266 if (genpd_lock_interruptible(genpd)) in perf_state_show()
3269 seq_printf(s, "%u\n", genpd->performance_state); in perf_state_show()
3271 genpd_unlock(genpd); in perf_state_show()
3284 static void genpd_debug_add(struct generic_pm_domain *genpd) in genpd_debug_add() argument
3291 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); in genpd_debug_add()
3294 d, genpd, &status_fops); in genpd_debug_add()
3296 d, genpd, &sub_domains_fops); in genpd_debug_add()
3298 d, genpd, &idle_states_fops); in genpd_debug_add()
3300 d, genpd, &active_time_fops); in genpd_debug_add()
3302 d, genpd, &total_idle_time_fops); in genpd_debug_add()
3304 d, genpd, &devices_fops); in genpd_debug_add()
3305 if (genpd->set_performance_state) in genpd_debug_add()
3307 d, genpd, &perf_state_fops); in genpd_debug_add()
3312 struct generic_pm_domain *genpd; in genpd_debug_init() local
3319 list_for_each_entry(genpd, &gpd_list, gpd_list_node) in genpd_debug_init()
3320 genpd_debug_add(genpd); in genpd_debug_init()