Lines Matching refs:dev
99 void device_pm_sleep_init(struct device *dev) in device_pm_sleep_init() argument
101 dev->power.is_prepared = false; in device_pm_sleep_init()
102 dev->power.is_suspended = false; in device_pm_sleep_init()
103 dev->power.is_noirq_suspended = false; in device_pm_sleep_init()
104 dev->power.is_late_suspended = false; in device_pm_sleep_init()
105 init_completion(&dev->power.completion); in device_pm_sleep_init()
106 complete_all(&dev->power.completion); in device_pm_sleep_init()
107 dev->power.wakeup = NULL; in device_pm_sleep_init()
108 INIT_LIST_HEAD(&dev->power.entry); in device_pm_sleep_init()
131 void device_pm_add(struct device *dev) in device_pm_add() argument
134 if (device_pm_not_required(dev)) in device_pm_add()
138 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_add()
139 device_pm_check_callbacks(dev); in device_pm_add()
141 if (dev->parent && dev->parent->power.is_prepared) in device_pm_add()
142 dev_warn(dev, "parent %s should not be sleeping\n", in device_pm_add()
143 dev_name(dev->parent)); in device_pm_add()
144 list_add_tail(&dev->power.entry, &dpm_list); in device_pm_add()
145 dev->power.in_dpm_list = true; in device_pm_add()
153 void device_pm_remove(struct device *dev) in device_pm_remove() argument
155 if (device_pm_not_required(dev)) in device_pm_remove()
159 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_remove()
160 complete_all(&dev->power.completion); in device_pm_remove()
162 list_del_init(&dev->power.entry); in device_pm_remove()
163 dev->power.in_dpm_list = false; in device_pm_remove()
165 device_wakeup_disable(dev); in device_pm_remove()
166 pm_runtime_remove(dev); in device_pm_remove()
167 device_pm_check_callbacks(dev); in device_pm_remove()
202 void device_pm_move_last(struct device *dev) in device_pm_move_last() argument
205 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_move_last()
206 list_move_tail(&dev->power.entry, &dpm_list); in device_pm_move_last()
209 static ktime_t initcall_debug_start(struct device *dev, void *cb) in initcall_debug_start() argument
214 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb, in initcall_debug_start()
216 dev->parent ? dev_name(dev->parent) : "none"); in initcall_debug_start()
220 static void initcall_debug_report(struct device *dev, ktime_t calltime, in initcall_debug_report() argument
229 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error, in initcall_debug_report()
238 static void dpm_wait(struct device *dev, bool async) in dpm_wait() argument
240 if (!dev) in dpm_wait()
243 if (async || (pm_async_enabled && dev->power.async_suspend)) in dpm_wait()
244 wait_for_completion(&dev->power.completion); in dpm_wait()
247 static int dpm_wait_fn(struct device *dev, void *async_ptr) in dpm_wait_fn() argument
249 dpm_wait(dev, *((bool *)async_ptr)); in dpm_wait_fn()
253 static void dpm_wait_for_children(struct device *dev, bool async) in dpm_wait_for_children() argument
255 device_for_each_child(dev, &async, dpm_wait_fn); in dpm_wait_for_children()
258 static void dpm_wait_for_suppliers(struct device *dev, bool async) in dpm_wait_for_suppliers() argument
272 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) in dpm_wait_for_suppliers()
279 static bool dpm_wait_for_superior(struct device *dev, bool async) in dpm_wait_for_superior() argument
292 if (!device_pm_initialized(dev)) { in dpm_wait_for_superior()
297 parent = get_device(dev->parent); in dpm_wait_for_superior()
304 dpm_wait_for_suppliers(dev, async); in dpm_wait_for_superior()
310 return device_pm_initialized(dev); in dpm_wait_for_superior()
313 static void dpm_wait_for_consumers(struct device *dev, bool async) in dpm_wait_for_consumers() argument
329 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) in dpm_wait_for_consumers()
336 static void dpm_wait_for_subordinate(struct device *dev, bool async) in dpm_wait_for_subordinate() argument
338 dpm_wait_for_children(dev, async); in dpm_wait_for_subordinate()
339 dpm_wait_for_consumers(dev, async); in dpm_wait_for_subordinate()
441 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) in pm_dev_dbg() argument
443 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event), in pm_dev_dbg()
444 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? in pm_dev_dbg()
445 ", may wakeup" : "", dev->power.driver_flags); in pm_dev_dbg()
448 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, in pm_dev_err() argument
451 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info, in pm_dev_err()
475 static int dpm_run_callback(pm_callback_t cb, struct device *dev, in dpm_run_callback() argument
484 calltime = initcall_debug_start(dev, cb); in dpm_run_callback()
486 pm_dev_dbg(dev, state, info); in dpm_run_callback()
487 trace_device_pm_callback_start(dev, info, state.event); in dpm_run_callback()
488 error = cb(dev); in dpm_run_callback()
489 trace_device_pm_callback_end(dev, error); in dpm_run_callback()
492 initcall_debug_report(dev, calltime, cb, error); in dpm_run_callback()
499 struct device *dev; member
519 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); in dpm_watchdog_handler()
522 dev_driver_string(wd->dev), dev_name(wd->dev)); in dpm_watchdog_handler()
530 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) in dpm_watchdog_set() argument
534 wd->dev = dev; in dpm_watchdog_set()
572 bool dev_pm_skip_resume(struct device *dev) in dev_pm_skip_resume() argument
578 return dev_pm_skip_suspend(dev); in dev_pm_skip_resume()
580 return !dev->power.must_resume; in dev_pm_skip_resume()
592 static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async) in __device_resume_noirq() argument
599 TRACE_DEVICE(dev); in __device_resume_noirq()
602 if (dev->power.syscore || dev->power.direct_complete) in __device_resume_noirq()
605 if (!dev->power.is_noirq_suspended) in __device_resume_noirq()
608 if (!dpm_wait_for_superior(dev, async)) in __device_resume_noirq()
611 skip_resume = dev_pm_skip_resume(dev); in __device_resume_noirq()
623 pm_runtime_set_suspended(dev); in __device_resume_noirq()
624 else if (dev_pm_skip_suspend(dev)) in __device_resume_noirq()
625 pm_runtime_set_active(dev); in __device_resume_noirq()
627 if (dev->pm_domain) { in __device_resume_noirq()
629 callback = pm_noirq_op(&dev->pm_domain->ops, state); in __device_resume_noirq()
630 } else if (dev->type && dev->type->pm) { in __device_resume_noirq()
632 callback = pm_noirq_op(dev->type->pm, state); in __device_resume_noirq()
633 } else if (dev->class && dev->class->pm) { in __device_resume_noirq()
635 callback = pm_noirq_op(dev->class->pm, state); in __device_resume_noirq()
636 } else if (dev->bus && dev->bus->pm) { in __device_resume_noirq()
638 callback = pm_noirq_op(dev->bus->pm, state); in __device_resume_noirq()
646 if (dev->driver && dev->driver->pm) { in __device_resume_noirq()
648 callback = pm_noirq_op(dev->driver->pm, state); in __device_resume_noirq()
652 error = dpm_run_callback(callback, dev, state, info); in __device_resume_noirq()
655 dev->power.is_noirq_suspended = false; in __device_resume_noirq()
658 complete_all(&dev->power.completion); in __device_resume_noirq()
664 dpm_save_failed_dev(dev_name(dev)); in __device_resume_noirq()
665 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); in __device_resume_noirq()
669 static bool is_async(struct device *dev) in is_async() argument
671 return dev->power.async_suspend && pm_async_enabled in is_async()
675 static bool dpm_async_fn(struct device *dev, async_func_t func) in dpm_async_fn() argument
677 reinit_completion(&dev->power.completion); in dpm_async_fn()
679 if (!is_async(dev)) in dpm_async_fn()
682 get_device(dev); in dpm_async_fn()
684 if (async_schedule_dev_nocall(func, dev)) in dpm_async_fn()
687 put_device(dev); in dpm_async_fn()
694 struct device *dev = data; in async_resume_noirq() local
696 __device_resume_noirq(dev, pm_transition, true); in async_resume_noirq()
697 put_device(dev); in async_resume_noirq()
700 static void device_resume_noirq(struct device *dev) in device_resume_noirq() argument
702 if (dpm_async_fn(dev, async_resume_noirq)) in device_resume_noirq()
705 __device_resume_noirq(dev, pm_transition, false); in device_resume_noirq()
710 struct device *dev; in dpm_noirq_resume_devices() local
718 dev = to_device(dpm_noirq_list.next); in dpm_noirq_resume_devices()
719 get_device(dev); in dpm_noirq_resume_devices()
720 list_move_tail(&dev->power.entry, &dpm_late_early_list); in dpm_noirq_resume_devices()
724 device_resume_noirq(dev); in dpm_noirq_resume_devices()
726 put_device(dev); in dpm_noirq_resume_devices()
761 static void __device_resume_early(struct device *dev, pm_message_t state, bool async) in __device_resume_early() argument
767 TRACE_DEVICE(dev); in __device_resume_early()
770 if (dev->power.syscore || dev->power.direct_complete) in __device_resume_early()
773 if (!dev->power.is_late_suspended) in __device_resume_early()
776 if (!dpm_wait_for_superior(dev, async)) in __device_resume_early()
779 if (dev->pm_domain) { in __device_resume_early()
781 callback = pm_late_early_op(&dev->pm_domain->ops, state); in __device_resume_early()
782 } else if (dev->type && dev->type->pm) { in __device_resume_early()
784 callback = pm_late_early_op(dev->type->pm, state); in __device_resume_early()
785 } else if (dev->class && dev->class->pm) { in __device_resume_early()
787 callback = pm_late_early_op(dev->class->pm, state); in __device_resume_early()
788 } else if (dev->bus && dev->bus->pm) { in __device_resume_early()
790 callback = pm_late_early_op(dev->bus->pm, state); in __device_resume_early()
795 if (dev_pm_skip_resume(dev)) in __device_resume_early()
798 if (dev->driver && dev->driver->pm) { in __device_resume_early()
800 callback = pm_late_early_op(dev->driver->pm, state); in __device_resume_early()
804 error = dpm_run_callback(callback, dev, state, info); in __device_resume_early()
807 dev->power.is_late_suspended = false; in __device_resume_early()
812 pm_runtime_enable(dev); in __device_resume_early()
813 complete_all(&dev->power.completion); in __device_resume_early()
818 dpm_save_failed_dev(dev_name(dev)); in __device_resume_early()
819 pm_dev_err(dev, state, async ? " async early" : " early", error); in __device_resume_early()
825 struct device *dev = data; in async_resume_early() local
827 __device_resume_early(dev, pm_transition, true); in async_resume_early()
828 put_device(dev); in async_resume_early()
831 static void device_resume_early(struct device *dev) in device_resume_early() argument
833 if (dpm_async_fn(dev, async_resume_early)) in device_resume_early()
836 __device_resume_early(dev, pm_transition, false); in device_resume_early()
845 struct device *dev; in dpm_resume_early() local
853 dev = to_device(dpm_late_early_list.next); in dpm_resume_early()
854 get_device(dev); in dpm_resume_early()
855 list_move_tail(&dev->power.entry, &dpm_suspended_list); in dpm_resume_early()
859 device_resume_early(dev); in dpm_resume_early()
861 put_device(dev); in dpm_resume_early()
888 static void __device_resume(struct device *dev, pm_message_t state, bool async) in __device_resume() argument
895 TRACE_DEVICE(dev); in __device_resume()
898 if (dev->power.syscore) in __device_resume()
901 if (dev->power.direct_complete) { in __device_resume()
903 pm_runtime_enable(dev); in __device_resume()
907 if (!dpm_wait_for_superior(dev, async)) in __device_resume()
910 dpm_watchdog_set(&wd, dev); in __device_resume()
911 device_lock(dev); in __device_resume()
917 dev->power.is_prepared = false; in __device_resume()
919 if (!dev->power.is_suspended) in __device_resume()
922 if (dev->pm_domain) { in __device_resume()
924 callback = pm_op(&dev->pm_domain->ops, state); in __device_resume()
928 if (dev->type && dev->type->pm) { in __device_resume()
930 callback = pm_op(dev->type->pm, state); in __device_resume()
934 if (dev->class && dev->class->pm) { in __device_resume()
936 callback = pm_op(dev->class->pm, state); in __device_resume()
940 if (dev->bus) { in __device_resume()
941 if (dev->bus->pm) { in __device_resume()
943 callback = pm_op(dev->bus->pm, state); in __device_resume()
944 } else if (dev->bus->resume) { in __device_resume()
946 callback = dev->bus->resume; in __device_resume()
952 if (!callback && dev->driver && dev->driver->pm) { in __device_resume()
954 callback = pm_op(dev->driver->pm, state); in __device_resume()
958 error = dpm_run_callback(callback, dev, state, info); in __device_resume()
959 dev->power.is_suspended = false; in __device_resume()
962 device_unlock(dev); in __device_resume()
966 complete_all(&dev->power.completion); in __device_resume()
973 dpm_save_failed_dev(dev_name(dev)); in __device_resume()
974 pm_dev_err(dev, state, async ? " async" : "", error); in __device_resume()
980 struct device *dev = data; in async_resume() local
982 __device_resume(dev, pm_transition, true); in async_resume()
983 put_device(dev); in async_resume()
986 static void device_resume(struct device *dev) in device_resume() argument
988 if (dpm_async_fn(dev, async_resume)) in device_resume()
991 __device_resume(dev, pm_transition, false); in device_resume()
1003 struct device *dev; in dpm_resume() local
1014 dev = to_device(dpm_suspended_list.next); in dpm_resume()
1016 get_device(dev); in dpm_resume()
1020 device_resume(dev); in dpm_resume()
1024 if (!list_empty(&dev->power.entry)) in dpm_resume()
1025 list_move_tail(&dev->power.entry, &dpm_prepared_list); in dpm_resume()
1029 put_device(dev); in dpm_resume()
1047 static void device_complete(struct device *dev, pm_message_t state) in device_complete() argument
1052 if (dev->power.syscore) in device_complete()
1055 device_lock(dev); in device_complete()
1057 if (dev->pm_domain) { in device_complete()
1059 callback = dev->pm_domain->ops.complete; in device_complete()
1060 } else if (dev->type && dev->type->pm) { in device_complete()
1062 callback = dev->type->pm->complete; in device_complete()
1063 } else if (dev->class && dev->class->pm) { in device_complete()
1065 callback = dev->class->pm->complete; in device_complete()
1066 } else if (dev->bus && dev->bus->pm) { in device_complete()
1068 callback = dev->bus->pm->complete; in device_complete()
1071 if (!callback && dev->driver && dev->driver->pm) { in device_complete()
1073 callback = dev->driver->pm->complete; in device_complete()
1077 pm_dev_dbg(dev, state, info); in device_complete()
1078 callback(dev); in device_complete()
1081 device_unlock(dev); in device_complete()
1084 pm_runtime_put(dev); in device_complete()
1104 struct device *dev = to_device(dpm_prepared_list.prev); in dpm_complete() local
1106 get_device(dev); in dpm_complete()
1107 dev->power.is_prepared = false; in dpm_complete()
1108 list_move(&dev->power.entry, &list); in dpm_complete()
1112 trace_device_pm_callback_start(dev, "", state.event); in dpm_complete()
1113 device_complete(dev, state); in dpm_complete()
1114 trace_device_pm_callback_end(dev, 0); in dpm_complete()
1116 put_device(dev); in dpm_complete()
1166 static void dpm_superior_set_must_resume(struct device *dev) in dpm_superior_set_must_resume() argument
1171 if (dev->parent) in dpm_superior_set_must_resume()
1172 dev->parent->power.must_resume = true; in dpm_superior_set_must_resume()
1176 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) in dpm_superior_set_must_resume()
1191 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) in __device_suspend_noirq() argument
1197 TRACE_DEVICE(dev); in __device_suspend_noirq()
1200 dpm_wait_for_subordinate(dev, async); in __device_suspend_noirq()
1205 if (dev->power.syscore || dev->power.direct_complete) in __device_suspend_noirq()
1208 if (dev->pm_domain) { in __device_suspend_noirq()
1210 callback = pm_noirq_op(&dev->pm_domain->ops, state); in __device_suspend_noirq()
1211 } else if (dev->type && dev->type->pm) { in __device_suspend_noirq()
1213 callback = pm_noirq_op(dev->type->pm, state); in __device_suspend_noirq()
1214 } else if (dev->class && dev->class->pm) { in __device_suspend_noirq()
1216 callback = pm_noirq_op(dev->class->pm, state); in __device_suspend_noirq()
1217 } else if (dev->bus && dev->bus->pm) { in __device_suspend_noirq()
1219 callback = pm_noirq_op(dev->bus->pm, state); in __device_suspend_noirq()
1224 if (dev_pm_skip_suspend(dev)) in __device_suspend_noirq()
1227 if (dev->driver && dev->driver->pm) { in __device_suspend_noirq()
1229 callback = pm_noirq_op(dev->driver->pm, state); in __device_suspend_noirq()
1233 error = dpm_run_callback(callback, dev, state, info); in __device_suspend_noirq()
1237 dev_name(dev), pm_verb(state.event), error); in __device_suspend_noirq()
1242 dev->power.is_noirq_suspended = true; in __device_suspend_noirq()
1250 if (atomic_read(&dev->power.usage_count) > 1 || in __device_suspend_noirq()
1251 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && in __device_suspend_noirq()
1252 dev->power.may_skip_resume)) in __device_suspend_noirq()
1253 dev->power.must_resume = true; in __device_suspend_noirq()
1255 if (dev->power.must_resume) in __device_suspend_noirq()
1256 dpm_superior_set_must_resume(dev); in __device_suspend_noirq()
1259 complete_all(&dev->power.completion); in __device_suspend_noirq()
1266 struct device *dev = data; in async_suspend_noirq() local
1269 error = __device_suspend_noirq(dev, pm_transition, true); in async_suspend_noirq()
1271 dpm_save_failed_dev(dev_name(dev)); in async_suspend_noirq()
1272 pm_dev_err(dev, pm_transition, " async", error); in async_suspend_noirq()
1275 put_device(dev); in async_suspend_noirq()
1278 static int device_suspend_noirq(struct device *dev) in device_suspend_noirq() argument
1280 if (dpm_async_fn(dev, async_suspend_noirq)) in device_suspend_noirq()
1283 return __device_suspend_noirq(dev, pm_transition, false); in device_suspend_noirq()
1297 struct device *dev = to_device(dpm_late_early_list.prev); in dpm_noirq_suspend_devices() local
1299 get_device(dev); in dpm_noirq_suspend_devices()
1302 error = device_suspend_noirq(dev); in dpm_noirq_suspend_devices()
1307 pm_dev_err(dev, state, " noirq", error); in dpm_noirq_suspend_devices()
1308 dpm_save_failed_dev(dev_name(dev)); in dpm_noirq_suspend_devices()
1309 } else if (!list_empty(&dev->power.entry)) { in dpm_noirq_suspend_devices()
1310 list_move(&dev->power.entry, &dpm_noirq_list); in dpm_noirq_suspend_devices()
1315 put_device(dev); in dpm_noirq_suspend_devices()
1359 static void dpm_propagate_wakeup_to_parent(struct device *dev) in dpm_propagate_wakeup_to_parent() argument
1361 struct device *parent = dev->parent; in dpm_propagate_wakeup_to_parent()
1368 if (device_wakeup_path(dev) && !parent->power.ignore_children) in dpm_propagate_wakeup_to_parent()
1382 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) in __device_suspend_late() argument
1388 TRACE_DEVICE(dev); in __device_suspend_late()
1391 __pm_runtime_disable(dev, false); in __device_suspend_late()
1393 dpm_wait_for_subordinate(dev, async); in __device_suspend_late()
1403 if (dev->power.syscore || dev->power.direct_complete) in __device_suspend_late()
1406 if (dev->pm_domain) { in __device_suspend_late()
1408 callback = pm_late_early_op(&dev->pm_domain->ops, state); in __device_suspend_late()
1409 } else if (dev->type && dev->type->pm) { in __device_suspend_late()
1411 callback = pm_late_early_op(dev->type->pm, state); in __device_suspend_late()
1412 } else if (dev->class && dev->class->pm) { in __device_suspend_late()
1414 callback = pm_late_early_op(dev->class->pm, state); in __device_suspend_late()
1415 } else if (dev->bus && dev->bus->pm) { in __device_suspend_late()
1417 callback = pm_late_early_op(dev->bus->pm, state); in __device_suspend_late()
1422 if (dev_pm_skip_suspend(dev)) in __device_suspend_late()
1425 if (dev->driver && dev->driver->pm) { in __device_suspend_late()
1427 callback = pm_late_early_op(dev->driver->pm, state); in __device_suspend_late()
1431 error = dpm_run_callback(callback, dev, state, info); in __device_suspend_late()
1435 dev_name(dev), pm_verb(state.event), error); in __device_suspend_late()
1438 dpm_propagate_wakeup_to_parent(dev); in __device_suspend_late()
1441 dev->power.is_late_suspended = true; in __device_suspend_late()
1445 complete_all(&dev->power.completion); in __device_suspend_late()
1451 struct device *dev = data; in async_suspend_late() local
1454 error = __device_suspend_late(dev, pm_transition, true); in async_suspend_late()
1456 dpm_save_failed_dev(dev_name(dev)); in async_suspend_late()
1457 pm_dev_err(dev, pm_transition, " async", error); in async_suspend_late()
1459 put_device(dev); in async_suspend_late()
1462 static int device_suspend_late(struct device *dev) in device_suspend_late() argument
1464 if (dpm_async_fn(dev, async_suspend_late)) in device_suspend_late()
1467 return __device_suspend_late(dev, pm_transition, false); in device_suspend_late()
1485 struct device *dev = to_device(dpm_suspended_list.prev); in dpm_suspend_late() local
1487 get_device(dev); in dpm_suspend_late()
1491 error = device_suspend_late(dev); in dpm_suspend_late()
1495 if (!list_empty(&dev->power.entry)) in dpm_suspend_late()
1496 list_move(&dev->power.entry, &dpm_late_early_list); in dpm_suspend_late()
1499 pm_dev_err(dev, state, " late", error); in dpm_suspend_late()
1500 dpm_save_failed_dev(dev_name(dev)); in dpm_suspend_late()
1505 put_device(dev); in dpm_suspend_late()
1556 static int legacy_suspend(struct device *dev, pm_message_t state, in legacy_suspend() argument
1557 int (*cb)(struct device *dev, pm_message_t state), in legacy_suspend() argument
1563 calltime = initcall_debug_start(dev, cb); in legacy_suspend()
1565 trace_device_pm_callback_start(dev, info, state.event); in legacy_suspend()
1566 error = cb(dev, state); in legacy_suspend()
1567 trace_device_pm_callback_end(dev, error); in legacy_suspend()
1570 initcall_debug_report(dev, calltime, cb, error); in legacy_suspend()
1575 static void dpm_clear_superiors_direct_complete(struct device *dev) in dpm_clear_superiors_direct_complete() argument
1580 if (dev->parent) { in dpm_clear_superiors_direct_complete()
1581 spin_lock_irq(&dev->parent->power.lock); in dpm_clear_superiors_direct_complete()
1582 dev->parent->power.direct_complete = false; in dpm_clear_superiors_direct_complete()
1583 spin_unlock_irq(&dev->parent->power.lock); in dpm_clear_superiors_direct_complete()
1588 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { in dpm_clear_superiors_direct_complete()
1603 static int __device_suspend(struct device *dev, pm_message_t state, bool async) in __device_suspend() argument
1610 TRACE_DEVICE(dev); in __device_suspend()
1613 dpm_wait_for_subordinate(dev, async); in __device_suspend()
1616 dev->power.direct_complete = false; in __device_suspend()
1631 pm_runtime_barrier(dev); in __device_suspend()
1634 dev->power.direct_complete = false; in __device_suspend()
1639 if (dev->power.syscore) in __device_suspend()
1643 if (device_may_wakeup(dev) || device_wakeup_path(dev)) in __device_suspend()
1644 dev->power.direct_complete = false; in __device_suspend()
1646 if (dev->power.direct_complete) { in __device_suspend()
1647 if (pm_runtime_status_suspended(dev)) { in __device_suspend()
1648 pm_runtime_disable(dev); in __device_suspend()
1649 if (pm_runtime_status_suspended(dev)) { in __device_suspend()
1650 pm_dev_dbg(dev, state, "direct-complete "); in __device_suspend()
1654 pm_runtime_enable(dev); in __device_suspend()
1656 dev->power.direct_complete = false; in __device_suspend()
1659 dev->power.may_skip_resume = true; in __device_suspend()
1660 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME); in __device_suspend()
1662 dpm_watchdog_set(&wd, dev); in __device_suspend()
1663 device_lock(dev); in __device_suspend()
1665 if (dev->pm_domain) { in __device_suspend()
1667 callback = pm_op(&dev->pm_domain->ops, state); in __device_suspend()
1671 if (dev->type && dev->type->pm) { in __device_suspend()
1673 callback = pm_op(dev->type->pm, state); in __device_suspend()
1677 if (dev->class && dev->class->pm) { in __device_suspend()
1679 callback = pm_op(dev->class->pm, state); in __device_suspend()
1683 if (dev->bus) { in __device_suspend()
1684 if (dev->bus->pm) { in __device_suspend()
1686 callback = pm_op(dev->bus->pm, state); in __device_suspend()
1687 } else if (dev->bus->suspend) { in __device_suspend()
1688 pm_dev_dbg(dev, state, "legacy bus "); in __device_suspend()
1689 error = legacy_suspend(dev, state, dev->bus->suspend, in __device_suspend()
1696 if (!callback && dev->driver && dev->driver->pm) { in __device_suspend()
1698 callback = pm_op(dev->driver->pm, state); in __device_suspend()
1701 error = dpm_run_callback(callback, dev, state, info); in __device_suspend()
1705 dev->power.is_suspended = true; in __device_suspend()
1706 if (device_may_wakeup(dev)) in __device_suspend()
1707 dev->power.wakeup_path = true; in __device_suspend()
1709 dpm_propagate_wakeup_to_parent(dev); in __device_suspend()
1710 dpm_clear_superiors_direct_complete(dev); in __device_suspend()
1713 dev_name(dev), pm_verb(state.event), error); in __device_suspend()
1716 device_unlock(dev); in __device_suspend()
1723 complete_all(&dev->power.completion); in __device_suspend()
1730 struct device *dev = data; in async_suspend() local
1733 error = __device_suspend(dev, pm_transition, true); in async_suspend()
1735 dpm_save_failed_dev(dev_name(dev)); in async_suspend()
1736 pm_dev_err(dev, pm_transition, " async", error); in async_suspend()
1739 put_device(dev); in async_suspend()
1742 static int device_suspend(struct device *dev) in device_suspend() argument
1744 if (dpm_async_fn(dev, async_suspend)) in device_suspend()
1747 return __device_suspend(dev, pm_transition, false); in device_suspend()
1769 struct device *dev = to_device(dpm_prepared_list.prev); in dpm_suspend() local
1771 get_device(dev); in dpm_suspend()
1775 error = device_suspend(dev); in dpm_suspend()
1780 pm_dev_err(dev, state, "", error); in dpm_suspend()
1781 dpm_save_failed_dev(dev_name(dev)); in dpm_suspend()
1782 } else if (!list_empty(&dev->power.entry)) { in dpm_suspend()
1783 list_move(&dev->power.entry, &dpm_suspended_list); in dpm_suspend()
1788 put_device(dev); in dpm_suspend()
1816 static int device_prepare(struct device *dev, pm_message_t state) in device_prepare() argument
1827 pm_runtime_get_noresume(dev); in device_prepare()
1829 if (dev->power.syscore) in device_prepare()
1832 device_lock(dev); in device_prepare()
1834 dev->power.wakeup_path = false; in device_prepare()
1836 if (dev->power.no_pm_callbacks) in device_prepare()
1839 if (dev->pm_domain) in device_prepare()
1840 callback = dev->pm_domain->ops.prepare; in device_prepare()
1841 else if (dev->type && dev->type->pm) in device_prepare()
1842 callback = dev->type->pm->prepare; in device_prepare()
1843 else if (dev->class && dev->class->pm) in device_prepare()
1844 callback = dev->class->pm->prepare; in device_prepare()
1845 else if (dev->bus && dev->bus->pm) in device_prepare()
1846 callback = dev->bus->pm->prepare; in device_prepare()
1848 if (!callback && dev->driver && dev->driver->pm) in device_prepare()
1849 callback = dev->driver->pm->prepare; in device_prepare()
1852 ret = callback(dev); in device_prepare()
1855 device_unlock(dev); in device_prepare()
1859 pm_runtime_put(dev); in device_prepare()
1869 spin_lock_irq(&dev->power.lock); in device_prepare()
1870 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && in device_prepare()
1871 (ret > 0 || dev->power.no_pm_callbacks) && in device_prepare()
1872 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); in device_prepare()
1873 spin_unlock_irq(&dev->power.lock); in device_prepare()
1906 struct device *dev = to_device(dpm_list.next); in dpm_prepare() local
1908 get_device(dev); in dpm_prepare()
1912 trace_device_pm_callback_start(dev, "", state.event); in dpm_prepare()
1913 error = device_prepare(dev, state); in dpm_prepare()
1914 trace_device_pm_callback_end(dev, error); in dpm_prepare()
1919 dev->power.is_prepared = true; in dpm_prepare()
1920 if (!list_empty(&dev->power.entry)) in dpm_prepare()
1921 list_move_tail(&dev->power.entry, &dpm_prepared_list); in dpm_prepare()
1925 dev_info(dev, "not prepared for power transition: code %d\n", in dpm_prepare()
1928 dev_name(dev), error); in dpm_prepare()
1929 dpm_save_failed_dev(dev_name(dev)); in dpm_prepare()
1934 put_device(dev); in dpm_prepare()
1978 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) in device_pm_wait_for_dev() argument
1980 dpm_wait(dev, subordinate->power.async_suspend); in device_pm_wait_for_dev()
1995 struct device *dev; in dpm_for_each_dev() local
2001 list_for_each_entry(dev, &dpm_list, power.entry) in dpm_for_each_dev()
2002 fn(dev, data); in dpm_for_each_dev()
2022 void device_pm_check_callbacks(struct device *dev) in device_pm_check_callbacks() argument
2026 spin_lock_irqsave(&dev->power.lock, flags); in device_pm_check_callbacks()
2027 dev->power.no_pm_callbacks = in device_pm_check_callbacks()
2028 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && in device_pm_check_callbacks()
2029 !dev->bus->suspend && !dev->bus->resume)) && in device_pm_check_callbacks()
2030 (!dev->class || pm_ops_is_empty(dev->class->pm)) && in device_pm_check_callbacks()
2031 (!dev->type || pm_ops_is_empty(dev->type->pm)) && in device_pm_check_callbacks()
2032 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && in device_pm_check_callbacks()
2033 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && in device_pm_check_callbacks()
2034 !dev->driver->suspend && !dev->driver->resume)); in device_pm_check_callbacks()
2035 spin_unlock_irqrestore(&dev->power.lock, flags); in device_pm_check_callbacks()
2038 bool dev_pm_skip_suspend(struct device *dev) in dev_pm_skip_suspend() argument
2040 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) && in dev_pm_skip_suspend()
2041 pm_runtime_status_suspended(dev); in dev_pm_skip_suspend()