1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18 #define pr_fmt(fmt) "PM: " fmt
19
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 #include <linux/wakeup_reason.h>
38
39 #include "../base.h"
40 #include "power.h"
41
42 typedef int (*pm_callback_t)(struct device *);
43
44 /*
45 * The entries in the dpm_list list are in a depth first order, simply
46 * because children are guaranteed to be discovered after parents, and
47 * are inserted at the back of the list on discovery.
48 *
49 * Since device_pm_add() may be called with a device lock held,
50 * we must never try to acquire a device lock while holding
51 * dpm_list_mutex.
52 */
53
54 LIST_HEAD(dpm_list);
55 static LIST_HEAD(dpm_prepared_list);
56 static LIST_HEAD(dpm_suspended_list);
57 static LIST_HEAD(dpm_late_early_list);
58 static LIST_HEAD(dpm_noirq_list);
59
60 struct suspend_stats suspend_stats;
61 static DEFINE_MUTEX(dpm_list_mtx);
62 static pm_message_t pm_transition;
63
64 static int async_error;
65
pm_verb(int event)66 static const char *pm_verb(int event)
67 {
68 switch (event) {
69 case PM_EVENT_SUSPEND:
70 return "suspend";
71 case PM_EVENT_RESUME:
72 return "resume";
73 case PM_EVENT_FREEZE:
74 return "freeze";
75 case PM_EVENT_QUIESCE:
76 return "quiesce";
77 case PM_EVENT_HIBERNATE:
78 return "hibernate";
79 case PM_EVENT_THAW:
80 return "thaw";
81 case PM_EVENT_RESTORE:
82 return "restore";
83 case PM_EVENT_RECOVER:
84 return "recover";
85 default:
86 return "(unknown PM event)";
87 }
88 }
89
90 /**
91 * device_pm_sleep_init - Initialize system suspend-related device fields.
92 * @dev: Device object being initialized.
93 */
device_pm_sleep_init(struct device * dev)94 void device_pm_sleep_init(struct device *dev)
95 {
96 dev->power.is_prepared = false;
97 dev->power.is_suspended = false;
98 dev->power.is_noirq_suspended = false;
99 dev->power.is_late_suspended = false;
100 init_completion(&dev->power.completion);
101 complete_all(&dev->power.completion);
102 dev->power.wakeup = NULL;
103 INIT_LIST_HEAD(&dev->power.entry);
104 }
105
106 /**
107 * device_pm_lock - Lock the list of active devices used by the PM core.
108 */
device_pm_lock(void)109 void device_pm_lock(void)
110 {
111 mutex_lock(&dpm_list_mtx);
112 }
113
114 /**
115 * device_pm_unlock - Unlock the list of active devices used by the PM core.
116 */
device_pm_unlock(void)117 void device_pm_unlock(void)
118 {
119 mutex_unlock(&dpm_list_mtx);
120 }
121
122 /**
123 * device_pm_add - Add a device to the PM core's list of active devices.
124 * @dev: Device to add to the list.
125 */
device_pm_add(struct device * dev)126 void device_pm_add(struct device *dev)
127 {
128 /* Skip PM setup/initialization. */
129 if (device_pm_not_required(dev))
130 return;
131
132 pr_debug("Adding info for %s:%s\n",
133 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
134 device_pm_check_callbacks(dev);
135 mutex_lock(&dpm_list_mtx);
136 if (dev->parent && dev->parent->power.is_prepared)
137 dev_warn(dev, "parent %s should not be sleeping\n",
138 dev_name(dev->parent));
139 list_add_tail(&dev->power.entry, &dpm_list);
140 dev->power.in_dpm_list = true;
141 mutex_unlock(&dpm_list_mtx);
142 }
143
144 /**
145 * device_pm_remove - Remove a device from the PM core's list of active devices.
146 * @dev: Device to be removed from the list.
147 */
device_pm_remove(struct device * dev)148 void device_pm_remove(struct device *dev)
149 {
150 if (device_pm_not_required(dev))
151 return;
152
153 pr_debug("Removing info for %s:%s\n",
154 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
155 complete_all(&dev->power.completion);
156 mutex_lock(&dpm_list_mtx);
157 list_del_init(&dev->power.entry);
158 dev->power.in_dpm_list = false;
159 mutex_unlock(&dpm_list_mtx);
160 device_wakeup_disable(dev);
161 pm_runtime_remove(dev);
162 device_pm_check_callbacks(dev);
163 }
164
165 /**
166 * device_pm_move_before - Move device in the PM core's list of active devices.
167 * @deva: Device to move in dpm_list.
168 * @devb: Device @deva should come before.
169 */
device_pm_move_before(struct device * deva,struct device * devb)170 void device_pm_move_before(struct device *deva, struct device *devb)
171 {
172 pr_debug("Moving %s:%s before %s:%s\n",
173 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
174 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
175 /* Delete deva from dpm_list and reinsert before devb. */
176 list_move_tail(&deva->power.entry, &devb->power.entry);
177 }
178
179 /**
180 * device_pm_move_after - Move device in the PM core's list of active devices.
181 * @deva: Device to move in dpm_list.
182 * @devb: Device @deva should come after.
183 */
device_pm_move_after(struct device * deva,struct device * devb)184 void device_pm_move_after(struct device *deva, struct device *devb)
185 {
186 pr_debug("Moving %s:%s after %s:%s\n",
187 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
188 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
189 /* Delete deva from dpm_list and reinsert after devb. */
190 list_move(&deva->power.entry, &devb->power.entry);
191 }
192
193 /**
194 * device_pm_move_last - Move device to end of the PM core's list of devices.
195 * @dev: Device to move in dpm_list.
196 */
device_pm_move_last(struct device * dev)197 void device_pm_move_last(struct device *dev)
198 {
199 pr_debug("Moving %s:%s to end of list\n",
200 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
201 list_move_tail(&dev->power.entry, &dpm_list);
202 }
203
initcall_debug_start(struct device * dev,void * cb)204 static ktime_t initcall_debug_start(struct device *dev, void *cb)
205 {
206 if (!pm_print_times_enabled)
207 return 0;
208
209 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
210 task_pid_nr(current),
211 dev->parent ? dev_name(dev->parent) : "none");
212 return ktime_get();
213 }
214
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)215 static void initcall_debug_report(struct device *dev, ktime_t calltime,
216 void *cb, int error)
217 {
218 ktime_t rettime;
219 s64 nsecs;
220
221 if (!pm_print_times_enabled)
222 return;
223
224 rettime = ktime_get();
225 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
226
227 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
228 (unsigned long long)nsecs >> 10);
229 }
230
231 /**
232 * dpm_wait - Wait for a PM operation to complete.
233 * @dev: Device to wait for.
234 * @async: If unset, wait only if the device's power.async_suspend flag is set.
235 */
dpm_wait(struct device * dev,bool async)236 static void dpm_wait(struct device *dev, bool async)
237 {
238 if (!dev)
239 return;
240
241 if (async || (pm_async_enabled && dev->power.async_suspend))
242 wait_for_completion(&dev->power.completion);
243 }
244
dpm_wait_fn(struct device * dev,void * async_ptr)245 static int dpm_wait_fn(struct device *dev, void *async_ptr)
246 {
247 dpm_wait(dev, *((bool *)async_ptr));
248 return 0;
249 }
250
dpm_wait_for_children(struct device * dev,bool async)251 static void dpm_wait_for_children(struct device *dev, bool async)
252 {
253 device_for_each_child(dev, &async, dpm_wait_fn);
254 }
255
dpm_wait_for_suppliers(struct device * dev,bool async)256 static void dpm_wait_for_suppliers(struct device *dev, bool async)
257 {
258 struct device_link *link;
259 int idx;
260
261 idx = device_links_read_lock();
262
263 /*
264 * If the supplier goes away right after we've checked the link to it,
265 * we'll wait for its completion to change the state, but that's fine,
266 * because the only things that will block as a result are the SRCU
267 * callbacks freeing the link objects for the links in the list we're
268 * walking.
269 */
270 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
271 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
272 dpm_wait(link->supplier, async);
273
274 device_links_read_unlock(idx);
275 }
276
dpm_wait_for_superior(struct device * dev,bool async)277 static void dpm_wait_for_superior(struct device *dev, bool async)
278 {
279 dpm_wait(dev->parent, async);
280 dpm_wait_for_suppliers(dev, async);
281 }
282
dpm_wait_for_consumers(struct device * dev,bool async)283 static void dpm_wait_for_consumers(struct device *dev, bool async)
284 {
285 struct device_link *link;
286 int idx;
287
288 idx = device_links_read_lock();
289
290 /*
291 * The status of a device link can only be changed from "dormant" by a
292 * probe, but that cannot happen during system suspend/resume. In
293 * theory it can change to "dormant" at that time, but then it is
294 * reasonable to wait for the target device anyway (eg. if it goes
295 * away, it's better to wait for it to go away completely and then
296 * continue instead of trying to continue in parallel with its
297 * unregistration).
298 */
299 list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
300 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
301 dpm_wait(link->consumer, async);
302
303 device_links_read_unlock(idx);
304 }
305
dpm_wait_for_subordinate(struct device * dev,bool async)306 static void dpm_wait_for_subordinate(struct device *dev, bool async)
307 {
308 dpm_wait_for_children(dev, async);
309 dpm_wait_for_consumers(dev, async);
310 }
311
312 /**
313 * pm_op - Return the PM operation appropriate for given PM event.
314 * @ops: PM operations to choose from.
315 * @state: PM transition of the system being carried out.
316 */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)317 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
318 {
319 switch (state.event) {
320 #ifdef CONFIG_SUSPEND
321 case PM_EVENT_SUSPEND:
322 return ops->suspend;
323 case PM_EVENT_RESUME:
324 return ops->resume;
325 #endif /* CONFIG_SUSPEND */
326 #ifdef CONFIG_HIBERNATE_CALLBACKS
327 case PM_EVENT_FREEZE:
328 case PM_EVENT_QUIESCE:
329 return ops->freeze;
330 case PM_EVENT_HIBERNATE:
331 return ops->poweroff;
332 case PM_EVENT_THAW:
333 case PM_EVENT_RECOVER:
334 return ops->thaw;
335 break;
336 case PM_EVENT_RESTORE:
337 return ops->restore;
338 #endif /* CONFIG_HIBERNATE_CALLBACKS */
339 }
340
341 return NULL;
342 }
343
344 /**
345 * pm_late_early_op - Return the PM operation appropriate for given PM event.
346 * @ops: PM operations to choose from.
347 * @state: PM transition of the system being carried out.
348 *
349 * Runtime PM is disabled for @dev while this function is being executed.
350 */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)351 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
352 pm_message_t state)
353 {
354 switch (state.event) {
355 #ifdef CONFIG_SUSPEND
356 case PM_EVENT_SUSPEND:
357 return ops->suspend_late;
358 case PM_EVENT_RESUME:
359 return ops->resume_early;
360 #endif /* CONFIG_SUSPEND */
361 #ifdef CONFIG_HIBERNATE_CALLBACKS
362 case PM_EVENT_FREEZE:
363 case PM_EVENT_QUIESCE:
364 return ops->freeze_late;
365 case PM_EVENT_HIBERNATE:
366 return ops->poweroff_late;
367 case PM_EVENT_THAW:
368 case PM_EVENT_RECOVER:
369 return ops->thaw_early;
370 case PM_EVENT_RESTORE:
371 return ops->restore_early;
372 #endif /* CONFIG_HIBERNATE_CALLBACKS */
373 }
374
375 return NULL;
376 }
377
378 /**
379 * pm_noirq_op - Return the PM operation appropriate for given PM event.
380 * @ops: PM operations to choose from.
381 * @state: PM transition of the system being carried out.
382 *
383 * The driver of @dev will not receive interrupts while this function is being
384 * executed.
385 */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)386 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
387 {
388 switch (state.event) {
389 #ifdef CONFIG_SUSPEND
390 case PM_EVENT_SUSPEND:
391 return ops->suspend_noirq;
392 case PM_EVENT_RESUME:
393 return ops->resume_noirq;
394 #endif /* CONFIG_SUSPEND */
395 #ifdef CONFIG_HIBERNATE_CALLBACKS
396 case PM_EVENT_FREEZE:
397 case PM_EVENT_QUIESCE:
398 return ops->freeze_noirq;
399 case PM_EVENT_HIBERNATE:
400 return ops->poweroff_noirq;
401 case PM_EVENT_THAW:
402 case PM_EVENT_RECOVER:
403 return ops->thaw_noirq;
404 case PM_EVENT_RESTORE:
405 return ops->restore_noirq;
406 #endif /* CONFIG_HIBERNATE_CALLBACKS */
407 }
408
409 return NULL;
410 }
411
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)412 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
413 {
414 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
415 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
416 ", may wakeup" : "");
417 }
418
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)419 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
420 int error)
421 {
422 pr_err("Device %s failed to %s%s: error %d\n",
423 dev_name(dev), pm_verb(state.event), info, error);
424 }
425
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)426 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
427 const char *info)
428 {
429 ktime_t calltime;
430 u64 usecs64;
431 int usecs;
432
433 calltime = ktime_get();
434 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
435 do_div(usecs64, NSEC_PER_USEC);
436 usecs = usecs64;
437 if (usecs == 0)
438 usecs = 1;
439
440 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
441 info ?: "", info ? " " : "", pm_verb(state.event),
442 error ? "aborted" : "complete",
443 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
444 }
445
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)446 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
447 pm_message_t state, const char *info)
448 {
449 ktime_t calltime;
450 int error;
451
452 if (!cb)
453 return 0;
454
455 calltime = initcall_debug_start(dev, cb);
456
457 pm_dev_dbg(dev, state, info);
458 trace_device_pm_callback_start(dev, info, state.event);
459 error = cb(dev);
460 trace_device_pm_callback_end(dev, error);
461 suspend_report_result(cb, error);
462
463 initcall_debug_report(dev, calltime, cb, error);
464
465 return error;
466 }
467
468 #ifdef CONFIG_DPM_WATCHDOG
469 struct dpm_watchdog {
470 struct device *dev;
471 struct task_struct *tsk;
472 struct timer_list timer;
473 };
474
475 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
476 struct dpm_watchdog wd
477
478 /**
479 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
480 * @t: The timer that PM watchdog depends on.
481 *
482 * Called when a driver has timed out suspending or resuming.
483 * There's not much we can do here to recover so panic() to
484 * capture a crash-dump in pstore.
485 */
dpm_watchdog_handler(struct timer_list * t)486 static void dpm_watchdog_handler(struct timer_list *t)
487 {
488 struct dpm_watchdog *wd = from_timer(wd, t, timer);
489
490 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
491 show_stack(wd->tsk, NULL);
492 panic("%s %s: unrecoverable failure\n",
493 dev_driver_string(wd->dev), dev_name(wd->dev));
494 }
495
496 /**
497 * dpm_watchdog_set - Enable pm watchdog for given device.
498 * @wd: Watchdog. Must be allocated on the stack.
499 * @dev: Device to handle.
500 */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)501 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
502 {
503 struct timer_list *timer = &wd->timer;
504
505 wd->dev = dev;
506 wd->tsk = current;
507
508 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
509 /* use same timeout value for both suspend and resume */
510 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
511 add_timer(timer);
512 }
513
514 /**
515 * dpm_watchdog_clear - Disable suspend/resume watchdog.
516 * @wd: Watchdog to disable.
517 */
dpm_watchdog_clear(struct dpm_watchdog * wd)518 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
519 {
520 struct timer_list *timer = &wd->timer;
521
522 del_timer_sync(timer);
523 destroy_timer_on_stack(timer);
524 }
525 #else
526 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
527 #define dpm_watchdog_set(x, y)
528 #define dpm_watchdog_clear(x)
529 #endif
530
531 /*------------------------- Resume routines -------------------------*/
532
533 /**
534 * suspend_event - Return a "suspend" message for given "resume" one.
535 * @resume_msg: PM message representing a system-wide resume transition.
536 */
suspend_event(pm_message_t resume_msg)537 static pm_message_t suspend_event(pm_message_t resume_msg)
538 {
539 switch (resume_msg.event) {
540 case PM_EVENT_RESUME:
541 return PMSG_SUSPEND;
542 case PM_EVENT_THAW:
543 case PM_EVENT_RESTORE:
544 return PMSG_FREEZE;
545 case PM_EVENT_RECOVER:
546 return PMSG_HIBERNATE;
547 }
548 return PMSG_ON;
549 }
550
551 /**
552 * dev_pm_may_skip_resume - System-wide device resume optimization check.
553 * @dev: Target device.
554 *
555 * Checks whether or not the device may be left in suspend after a system-wide
556 * transition to the working state.
557 */
dev_pm_may_skip_resume(struct device * dev)558 bool dev_pm_may_skip_resume(struct device *dev)
559 {
560 return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
561 }
562
dpm_subsys_resume_noirq_cb(struct device * dev,pm_message_t state,const char ** info_p)563 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
564 pm_message_t state,
565 const char **info_p)
566 {
567 pm_callback_t callback;
568 const char *info;
569
570 if (dev->pm_domain) {
571 info = "noirq power domain ";
572 callback = pm_noirq_op(&dev->pm_domain->ops, state);
573 } else if (dev->type && dev->type->pm) {
574 info = "noirq type ";
575 callback = pm_noirq_op(dev->type->pm, state);
576 } else if (dev->class && dev->class->pm) {
577 info = "noirq class ";
578 callback = pm_noirq_op(dev->class->pm, state);
579 } else if (dev->bus && dev->bus->pm) {
580 info = "noirq bus ";
581 callback = pm_noirq_op(dev->bus->pm, state);
582 } else {
583 return NULL;
584 }
585
586 if (info_p)
587 *info_p = info;
588
589 return callback;
590 }
591
592 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
593 pm_message_t state,
594 const char **info_p);
595
596 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
597 pm_message_t state,
598 const char **info_p);
599
600 /**
601 * device_resume_noirq - Execute a "noirq resume" callback for given device.
602 * @dev: Device to handle.
603 * @state: PM transition of the system being carried out.
604 * @async: If true, the device is being resumed asynchronously.
605 *
606 * The driver of @dev will not receive interrupts while this function is being
607 * executed.
608 */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)609 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
610 {
611 pm_callback_t callback;
612 const char *info;
613 bool skip_resume;
614 int error = 0;
615
616 TRACE_DEVICE(dev);
617 TRACE_RESUME(0);
618
619 if (dev->power.syscore || dev->power.direct_complete)
620 goto Out;
621
622 if (!dev->power.is_noirq_suspended)
623 goto Out;
624
625 dpm_wait_for_superior(dev, async);
626
627 skip_resume = dev_pm_may_skip_resume(dev);
628
629 callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
630 if (callback)
631 goto Run;
632
633 if (skip_resume)
634 goto Skip;
635
636 if (dev_pm_smart_suspend_and_suspended(dev)) {
637 pm_message_t suspend_msg = suspend_event(state);
638
639 /*
640 * If "freeze" callbacks have been skipped during a transition
641 * related to hibernation, the subsequent "thaw" callbacks must
642 * be skipped too or bad things may happen. Otherwise, resume
643 * callbacks are going to be run for the device, so its runtime
644 * PM status must be changed to reflect the new state after the
645 * transition under way.
646 */
647 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
648 !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
649 if (state.event == PM_EVENT_THAW) {
650 skip_resume = true;
651 goto Skip;
652 } else {
653 pm_runtime_set_active(dev);
654 }
655 }
656 }
657
658 if (dev->driver && dev->driver->pm) {
659 info = "noirq driver ";
660 callback = pm_noirq_op(dev->driver->pm, state);
661 }
662
663 Run:
664 error = dpm_run_callback(callback, dev, state, info);
665
666 Skip:
667 dev->power.is_noirq_suspended = false;
668
669 if (skip_resume) {
670 /* Make the next phases of resume skip the device. */
671 dev->power.is_late_suspended = false;
672 dev->power.is_suspended = false;
673 /*
674 * The device is going to be left in suspend, but it might not
675 * have been in runtime suspend before the system suspended, so
676 * its runtime PM status needs to be updated to avoid confusing
677 * the runtime PM framework when runtime PM is enabled for the
678 * device again.
679 */
680 pm_runtime_set_suspended(dev);
681 }
682
683 Out:
684 complete_all(&dev->power.completion);
685 TRACE_RESUME(error);
686 return error;
687 }
688
is_async(struct device * dev)689 static bool is_async(struct device *dev)
690 {
691 return dev->power.async_suspend && pm_async_enabled
692 && !pm_trace_is_enabled();
693 }
694
dpm_async_fn(struct device * dev,async_func_t func)695 static bool dpm_async_fn(struct device *dev, async_func_t func)
696 {
697 reinit_completion(&dev->power.completion);
698
699 if (is_async(dev)) {
700 get_device(dev);
701 async_schedule(func, dev);
702 return true;
703 }
704
705 return false;
706 }
707
async_resume_noirq(void * data,async_cookie_t cookie)708 static void async_resume_noirq(void *data, async_cookie_t cookie)
709 {
710 struct device *dev = (struct device *)data;
711 int error;
712
713 error = device_resume_noirq(dev, pm_transition, true);
714 if (error)
715 pm_dev_err(dev, pm_transition, " async", error);
716
717 put_device(dev);
718 }
719
dpm_noirq_resume_devices(pm_message_t state)720 static void dpm_noirq_resume_devices(pm_message_t state)
721 {
722 struct device *dev;
723 ktime_t starttime = ktime_get();
724
725 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
726 mutex_lock(&dpm_list_mtx);
727 pm_transition = state;
728
729 /*
730 * Advanced the async threads upfront,
731 * in case the starting of async threads is
732 * delayed by non-async resuming devices.
733 */
734 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
735 dpm_async_fn(dev, async_resume_noirq);
736
737 while (!list_empty(&dpm_noirq_list)) {
738 dev = to_device(dpm_noirq_list.next);
739 get_device(dev);
740 list_move_tail(&dev->power.entry, &dpm_late_early_list);
741 mutex_unlock(&dpm_list_mtx);
742
743 if (!is_async(dev)) {
744 int error;
745
746 error = device_resume_noirq(dev, state, false);
747 if (error) {
748 suspend_stats.failed_resume_noirq++;
749 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
750 dpm_save_failed_dev(dev_name(dev));
751 pm_dev_err(dev, state, " noirq", error);
752 }
753 }
754
755 mutex_lock(&dpm_list_mtx);
756 put_device(dev);
757 }
758 mutex_unlock(&dpm_list_mtx);
759 async_synchronize_full();
760 dpm_show_time(starttime, state, 0, "noirq");
761 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
762 }
763
764 /**
765 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
766 * @state: PM transition of the system being carried out.
767 *
768 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
769 * allow device drivers' interrupt handlers to be called.
770 */
dpm_resume_noirq(pm_message_t state)771 void dpm_resume_noirq(pm_message_t state)
772 {
773 dpm_noirq_resume_devices(state);
774
775 resume_device_irqs();
776 device_wakeup_disarm_wake_irqs();
777
778 cpuidle_resume();
779 }
780
dpm_subsys_resume_early_cb(struct device * dev,pm_message_t state,const char ** info_p)781 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
782 pm_message_t state,
783 const char **info_p)
784 {
785 pm_callback_t callback;
786 const char *info;
787
788 if (dev->pm_domain) {
789 info = "early power domain ";
790 callback = pm_late_early_op(&dev->pm_domain->ops, state);
791 } else if (dev->type && dev->type->pm) {
792 info = "early type ";
793 callback = pm_late_early_op(dev->type->pm, state);
794 } else if (dev->class && dev->class->pm) {
795 info = "early class ";
796 callback = pm_late_early_op(dev->class->pm, state);
797 } else if (dev->bus && dev->bus->pm) {
798 info = "early bus ";
799 callback = pm_late_early_op(dev->bus->pm, state);
800 } else {
801 return NULL;
802 }
803
804 if (info_p)
805 *info_p = info;
806
807 return callback;
808 }
809
810 /**
811 * device_resume_early - Execute an "early resume" callback for given device.
812 * @dev: Device to handle.
813 * @state: PM transition of the system being carried out.
814 * @async: If true, the device is being resumed asynchronously.
815 *
816 * Runtime PM is disabled for @dev while this function is being executed.
817 */
device_resume_early(struct device * dev,pm_message_t state,bool async)818 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
819 {
820 pm_callback_t callback;
821 const char *info;
822 int error = 0;
823
824 TRACE_DEVICE(dev);
825 TRACE_RESUME(0);
826
827 if (dev->power.syscore || dev->power.direct_complete)
828 goto Out;
829
830 if (!dev->power.is_late_suspended)
831 goto Out;
832
833 dpm_wait_for_superior(dev, async);
834
835 callback = dpm_subsys_resume_early_cb(dev, state, &info);
836
837 if (!callback && dev->driver && dev->driver->pm) {
838 info = "early driver ";
839 callback = pm_late_early_op(dev->driver->pm, state);
840 }
841
842 error = dpm_run_callback(callback, dev, state, info);
843 dev->power.is_late_suspended = false;
844
845 Out:
846 TRACE_RESUME(error);
847
848 pm_runtime_enable(dev);
849 complete_all(&dev->power.completion);
850 return error;
851 }
852
async_resume_early(void * data,async_cookie_t cookie)853 static void async_resume_early(void *data, async_cookie_t cookie)
854 {
855 struct device *dev = (struct device *)data;
856 int error;
857
858 error = device_resume_early(dev, pm_transition, true);
859 if (error)
860 pm_dev_err(dev, pm_transition, " async", error);
861
862 put_device(dev);
863 }
864
865 /**
866 * dpm_resume_early - Execute "early resume" callbacks for all devices.
867 * @state: PM transition of the system being carried out.
868 */
dpm_resume_early(pm_message_t state)869 void dpm_resume_early(pm_message_t state)
870 {
871 struct device *dev;
872 ktime_t starttime = ktime_get();
873
874 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
875 mutex_lock(&dpm_list_mtx);
876 pm_transition = state;
877
878 /*
879 * Advanced the async threads upfront,
880 * in case the starting of async threads is
881 * delayed by non-async resuming devices.
882 */
883 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
884 dpm_async_fn(dev, async_resume_early);
885
886 while (!list_empty(&dpm_late_early_list)) {
887 dev = to_device(dpm_late_early_list.next);
888 get_device(dev);
889 list_move_tail(&dev->power.entry, &dpm_suspended_list);
890 mutex_unlock(&dpm_list_mtx);
891
892 if (!is_async(dev)) {
893 int error;
894
895 error = device_resume_early(dev, state, false);
896 if (error) {
897 suspend_stats.failed_resume_early++;
898 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
899 dpm_save_failed_dev(dev_name(dev));
900 pm_dev_err(dev, state, " early", error);
901 }
902 }
903 mutex_lock(&dpm_list_mtx);
904 put_device(dev);
905 }
906 mutex_unlock(&dpm_list_mtx);
907 async_synchronize_full();
908 dpm_show_time(starttime, state, 0, "early");
909 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
910 }
911
912 /**
913 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
914 * @state: PM transition of the system being carried out.
915 */
dpm_resume_start(pm_message_t state)916 void dpm_resume_start(pm_message_t state)
917 {
918 dpm_resume_noirq(state);
919 dpm_resume_early(state);
920 }
921 EXPORT_SYMBOL_GPL(dpm_resume_start);
922
923 /**
924 * device_resume - Execute "resume" callbacks for given device.
925 * @dev: Device to handle.
926 * @state: PM transition of the system being carried out.
927 * @async: If true, the device is being resumed asynchronously.
928 */
device_resume(struct device * dev,pm_message_t state,bool async)929 static int device_resume(struct device *dev, pm_message_t state, bool async)
930 {
931 pm_callback_t callback = NULL;
932 const char *info = NULL;
933 int error = 0;
934 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
935
936 TRACE_DEVICE(dev);
937 TRACE_RESUME(0);
938
939 if (dev->power.syscore)
940 goto Complete;
941
942 if (dev->power.direct_complete) {
943 /* Match the pm_runtime_disable() in __device_suspend(). */
944 pm_runtime_enable(dev);
945 goto Complete;
946 }
947
948 dpm_wait_for_superior(dev, async);
949 dpm_watchdog_set(&wd, dev);
950 device_lock(dev);
951
952 /*
953 * This is a fib. But we'll allow new children to be added below
954 * a resumed device, even if the device hasn't been completed yet.
955 */
956 dev->power.is_prepared = false;
957
958 if (!dev->power.is_suspended)
959 goto Unlock;
960
961 if (dev->pm_domain) {
962 info = "power domain ";
963 callback = pm_op(&dev->pm_domain->ops, state);
964 goto Driver;
965 }
966
967 if (dev->type && dev->type->pm) {
968 info = "type ";
969 callback = pm_op(dev->type->pm, state);
970 goto Driver;
971 }
972
973 if (dev->class && dev->class->pm) {
974 info = "class ";
975 callback = pm_op(dev->class->pm, state);
976 goto Driver;
977 }
978
979 if (dev->bus) {
980 if (dev->bus->pm) {
981 info = "bus ";
982 callback = pm_op(dev->bus->pm, state);
983 } else if (dev->bus->resume) {
984 info = "legacy bus ";
985 callback = dev->bus->resume;
986 goto End;
987 }
988 }
989
990 Driver:
991 if (!callback && dev->driver && dev->driver->pm) {
992 info = "driver ";
993 callback = pm_op(dev->driver->pm, state);
994 }
995
996 End:
997 error = dpm_run_callback(callback, dev, state, info);
998 dev->power.is_suspended = false;
999
1000 Unlock:
1001 device_unlock(dev);
1002 dpm_watchdog_clear(&wd);
1003
1004 Complete:
1005 complete_all(&dev->power.completion);
1006
1007 TRACE_RESUME(error);
1008
1009 return error;
1010 }
1011
async_resume(void * data,async_cookie_t cookie)1012 static void async_resume(void *data, async_cookie_t cookie)
1013 {
1014 struct device *dev = (struct device *)data;
1015 int error;
1016
1017 error = device_resume(dev, pm_transition, true);
1018 if (error)
1019 pm_dev_err(dev, pm_transition, " async", error);
1020 put_device(dev);
1021 }
1022
1023 /**
1024 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1025 * @state: PM transition of the system being carried out.
1026 *
1027 * Execute the appropriate "resume" callback for all devices whose status
1028 * indicates that they are suspended.
1029 */
dpm_resume(pm_message_t state)1030 void dpm_resume(pm_message_t state)
1031 {
1032 struct device *dev;
1033 ktime_t starttime = ktime_get();
1034
1035 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1036 might_sleep();
1037
1038 mutex_lock(&dpm_list_mtx);
1039 pm_transition = state;
1040 async_error = 0;
1041
1042 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1043 dpm_async_fn(dev, async_resume);
1044
1045 while (!list_empty(&dpm_suspended_list)) {
1046 dev = to_device(dpm_suspended_list.next);
1047 get_device(dev);
1048 if (!is_async(dev)) {
1049 int error;
1050
1051 mutex_unlock(&dpm_list_mtx);
1052
1053 error = device_resume(dev, state, false);
1054 if (error) {
1055 suspend_stats.failed_resume++;
1056 dpm_save_failed_step(SUSPEND_RESUME);
1057 dpm_save_failed_dev(dev_name(dev));
1058 pm_dev_err(dev, state, "", error);
1059 }
1060
1061 mutex_lock(&dpm_list_mtx);
1062 }
1063 if (!list_empty(&dev->power.entry))
1064 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1065 put_device(dev);
1066 }
1067 mutex_unlock(&dpm_list_mtx);
1068 async_synchronize_full();
1069 dpm_show_time(starttime, state, 0, NULL);
1070
1071 cpufreq_resume();
1072 devfreq_resume();
1073 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1074 }
1075
1076 /**
1077 * device_complete - Complete a PM transition for given device.
1078 * @dev: Device to handle.
1079 * @state: PM transition of the system being carried out.
1080 */
device_complete(struct device * dev,pm_message_t state)1081 static void device_complete(struct device *dev, pm_message_t state)
1082 {
1083 void (*callback)(struct device *) = NULL;
1084 const char *info = NULL;
1085
1086 if (dev->power.syscore)
1087 return;
1088
1089 device_lock(dev);
1090
1091 if (dev->pm_domain) {
1092 info = "completing power domain ";
1093 callback = dev->pm_domain->ops.complete;
1094 } else if (dev->type && dev->type->pm) {
1095 info = "completing type ";
1096 callback = dev->type->pm->complete;
1097 } else if (dev->class && dev->class->pm) {
1098 info = "completing class ";
1099 callback = dev->class->pm->complete;
1100 } else if (dev->bus && dev->bus->pm) {
1101 info = "completing bus ";
1102 callback = dev->bus->pm->complete;
1103 }
1104
1105 if (!callback && dev->driver && dev->driver->pm) {
1106 info = "completing driver ";
1107 callback = dev->driver->pm->complete;
1108 }
1109
1110 if (callback) {
1111 pm_dev_dbg(dev, state, info);
1112 callback(dev);
1113 }
1114
1115 device_unlock(dev);
1116
1117 pm_runtime_put(dev);
1118 }
1119
1120 /**
1121 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1122 * @state: PM transition of the system being carried out.
1123 *
1124 * Execute the ->complete() callbacks for all devices whose PM status is not
1125 * DPM_ON (this allows new devices to be registered).
1126 */
dpm_complete(pm_message_t state)1127 void dpm_complete(pm_message_t state)
1128 {
1129 struct list_head list;
1130
1131 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1132 might_sleep();
1133
1134 INIT_LIST_HEAD(&list);
1135 mutex_lock(&dpm_list_mtx);
1136 while (!list_empty(&dpm_prepared_list)) {
1137 struct device *dev = to_device(dpm_prepared_list.prev);
1138
1139 get_device(dev);
1140 dev->power.is_prepared = false;
1141 list_move(&dev->power.entry, &list);
1142 mutex_unlock(&dpm_list_mtx);
1143
1144 trace_device_pm_callback_start(dev, "", state.event);
1145 device_complete(dev, state);
1146 trace_device_pm_callback_end(dev, 0);
1147
1148 mutex_lock(&dpm_list_mtx);
1149 put_device(dev);
1150 }
1151 list_splice(&list, &dpm_list);
1152 mutex_unlock(&dpm_list_mtx);
1153
1154 /* Allow device probing and trigger re-probing of deferred devices */
1155 device_unblock_probing();
1156 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1157 }
1158
1159 /**
1160 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1161 * @state: PM transition of the system being carried out.
1162 *
1163 * Execute "resume" callbacks for all devices and complete the PM transition of
1164 * the system.
1165 */
dpm_resume_end(pm_message_t state)1166 void dpm_resume_end(pm_message_t state)
1167 {
1168 dpm_resume(state);
1169 dpm_complete(state);
1170 }
1171 EXPORT_SYMBOL_GPL(dpm_resume_end);
1172
1173
1174 /*------------------------- Suspend routines -------------------------*/
1175
1176 /**
1177 * resume_event - Return a "resume" message for given "suspend" sleep state.
1178 * @sleep_state: PM message representing a sleep state.
1179 *
1180 * Return a PM message representing the resume event corresponding to given
1181 * sleep state.
1182 */
resume_event(pm_message_t sleep_state)1183 static pm_message_t resume_event(pm_message_t sleep_state)
1184 {
1185 switch (sleep_state.event) {
1186 case PM_EVENT_SUSPEND:
1187 return PMSG_RESUME;
1188 case PM_EVENT_FREEZE:
1189 case PM_EVENT_QUIESCE:
1190 return PMSG_RECOVER;
1191 case PM_EVENT_HIBERNATE:
1192 return PMSG_RESTORE;
1193 }
1194 return PMSG_ON;
1195 }
1196
dpm_superior_set_must_resume(struct device * dev)1197 static void dpm_superior_set_must_resume(struct device *dev)
1198 {
1199 struct device_link *link;
1200 int idx;
1201
1202 if (dev->parent)
1203 dev->parent->power.must_resume = true;
1204
1205 idx = device_links_read_lock();
1206
1207 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1208 link->supplier->power.must_resume = true;
1209
1210 device_links_read_unlock(idx);
1211 }
1212
dpm_subsys_suspend_noirq_cb(struct device * dev,pm_message_t state,const char ** info_p)1213 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1214 pm_message_t state,
1215 const char **info_p)
1216 {
1217 pm_callback_t callback;
1218 const char *info;
1219
1220 if (dev->pm_domain) {
1221 info = "noirq power domain ";
1222 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1223 } else if (dev->type && dev->type->pm) {
1224 info = "noirq type ";
1225 callback = pm_noirq_op(dev->type->pm, state);
1226 } else if (dev->class && dev->class->pm) {
1227 info = "noirq class ";
1228 callback = pm_noirq_op(dev->class->pm, state);
1229 } else if (dev->bus && dev->bus->pm) {
1230 info = "noirq bus ";
1231 callback = pm_noirq_op(dev->bus->pm, state);
1232 } else {
1233 return NULL;
1234 }
1235
1236 if (info_p)
1237 *info_p = info;
1238
1239 return callback;
1240 }
1241
device_must_resume(struct device * dev,pm_message_t state,bool no_subsys_suspend_noirq)1242 static bool device_must_resume(struct device *dev, pm_message_t state,
1243 bool no_subsys_suspend_noirq)
1244 {
1245 pm_message_t resume_msg = resume_event(state);
1246
1247 /*
1248 * If all of the device driver's "noirq", "late" and "early" callbacks
1249 * are invoked directly by the core, the decision to allow the device to
1250 * stay in suspend can be based on its current runtime PM status and its
1251 * wakeup settings.
1252 */
1253 if (no_subsys_suspend_noirq &&
1254 !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1255 !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1256 !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1257 return !pm_runtime_status_suspended(dev) &&
1258 (resume_msg.event != PM_EVENT_RESUME ||
1259 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1260
1261 /*
1262 * The only safe strategy here is to require that if the device may not
1263 * be left in suspend, resume callbacks must be invoked for it.
1264 */
1265 return !dev->power.may_skip_resume;
1266 }
1267
1268 /**
1269 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1270 * @dev: Device to handle.
1271 * @state: PM transition of the system being carried out.
1272 * @async: If true, the device is being suspended asynchronously.
1273 *
1274 * The driver of @dev will not receive interrupts while this function is being
1275 * executed.
1276 */
__device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1277 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1278 {
1279 pm_callback_t callback;
1280 const char *info;
1281 bool no_subsys_cb = false;
1282 int error = 0;
1283
1284 TRACE_DEVICE(dev);
1285 TRACE_SUSPEND(0);
1286
1287 dpm_wait_for_subordinate(dev, async);
1288
1289 if (async_error)
1290 goto Complete;
1291
1292 if (dev->power.syscore || dev->power.direct_complete)
1293 goto Complete;
1294
1295 callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1296 if (callback)
1297 goto Run;
1298
1299 no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1300
1301 if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1302 goto Skip;
1303
1304 if (dev->driver && dev->driver->pm) {
1305 info = "noirq driver ";
1306 callback = pm_noirq_op(dev->driver->pm, state);
1307 }
1308
1309 Run:
1310 error = dpm_run_callback(callback, dev, state, info);
1311 if (error) {
1312 async_error = error;
1313 goto Complete;
1314 }
1315
1316 Skip:
1317 dev->power.is_noirq_suspended = true;
1318
1319 if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1320 dev->power.must_resume = dev->power.must_resume ||
1321 atomic_read(&dev->power.usage_count) > 1 ||
1322 device_must_resume(dev, state, no_subsys_cb);
1323 } else {
1324 dev->power.must_resume = true;
1325 }
1326
1327 if (dev->power.must_resume)
1328 dpm_superior_set_must_resume(dev);
1329
1330 Complete:
1331 complete_all(&dev->power.completion);
1332 TRACE_SUSPEND(error);
1333 return error;
1334 }
1335
async_suspend_noirq(void * data,async_cookie_t cookie)1336 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1337 {
1338 struct device *dev = (struct device *)data;
1339 int error;
1340
1341 error = __device_suspend_noirq(dev, pm_transition, true);
1342 if (error) {
1343 dpm_save_failed_dev(dev_name(dev));
1344 pm_dev_err(dev, pm_transition, " async", error);
1345 }
1346
1347 put_device(dev);
1348 }
1349
device_suspend_noirq(struct device * dev)1350 static int device_suspend_noirq(struct device *dev)
1351 {
1352 if (dpm_async_fn(dev, async_suspend_noirq))
1353 return 0;
1354
1355 return __device_suspend_noirq(dev, pm_transition, false);
1356 }
1357
dpm_noirq_suspend_devices(pm_message_t state)1358 static int dpm_noirq_suspend_devices(pm_message_t state)
1359 {
1360 ktime_t starttime = ktime_get();
1361 int error = 0;
1362
1363 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1364 mutex_lock(&dpm_list_mtx);
1365 pm_transition = state;
1366 async_error = 0;
1367
1368 while (!list_empty(&dpm_late_early_list)) {
1369 struct device *dev = to_device(dpm_late_early_list.prev);
1370
1371 get_device(dev);
1372 mutex_unlock(&dpm_list_mtx);
1373
1374 error = device_suspend_noirq(dev);
1375
1376 mutex_lock(&dpm_list_mtx);
1377 if (error) {
1378 pm_dev_err(dev, state, " noirq", error);
1379 dpm_save_failed_dev(dev_name(dev));
1380 put_device(dev);
1381 break;
1382 }
1383 if (!list_empty(&dev->power.entry))
1384 list_move(&dev->power.entry, &dpm_noirq_list);
1385 put_device(dev);
1386
1387 if (async_error)
1388 break;
1389 }
1390 mutex_unlock(&dpm_list_mtx);
1391 async_synchronize_full();
1392 if (!error)
1393 error = async_error;
1394
1395 if (error) {
1396 suspend_stats.failed_suspend_noirq++;
1397 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1398 }
1399 dpm_show_time(starttime, state, error, "noirq");
1400 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1401 return error;
1402 }
1403
1404 /**
1405 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1406 * @state: PM transition of the system being carried out.
1407 *
1408 * Prevent device drivers' interrupt handlers from being called and invoke
1409 * "noirq" suspend callbacks for all non-sysdev devices.
1410 */
dpm_suspend_noirq(pm_message_t state)1411 int dpm_suspend_noirq(pm_message_t state)
1412 {
1413 int ret;
1414
1415 cpuidle_pause();
1416
1417 device_wakeup_arm_wake_irqs();
1418 suspend_device_irqs();
1419
1420 ret = dpm_noirq_suspend_devices(state);
1421 if (ret)
1422 dpm_resume_noirq(resume_event(state));
1423
1424 return ret;
1425 }
1426
dpm_propagate_wakeup_to_parent(struct device * dev)1427 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1428 {
1429 struct device *parent = dev->parent;
1430
1431 if (!parent)
1432 return;
1433
1434 spin_lock_irq(&parent->power.lock);
1435
1436 if (dev->power.wakeup_path && !parent->power.ignore_children)
1437 parent->power.wakeup_path = true;
1438
1439 spin_unlock_irq(&parent->power.lock);
1440 }
1441
dpm_subsys_suspend_late_cb(struct device * dev,pm_message_t state,const char ** info_p)1442 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1443 pm_message_t state,
1444 const char **info_p)
1445 {
1446 pm_callback_t callback;
1447 const char *info;
1448
1449 if (dev->pm_domain) {
1450 info = "late power domain ";
1451 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1452 } else if (dev->type && dev->type->pm) {
1453 info = "late type ";
1454 callback = pm_late_early_op(dev->type->pm, state);
1455 } else if (dev->class && dev->class->pm) {
1456 info = "late class ";
1457 callback = pm_late_early_op(dev->class->pm, state);
1458 } else if (dev->bus && dev->bus->pm) {
1459 info = "late bus ";
1460 callback = pm_late_early_op(dev->bus->pm, state);
1461 } else {
1462 return NULL;
1463 }
1464
1465 if (info_p)
1466 *info_p = info;
1467
1468 return callback;
1469 }
1470
1471 /**
1472 * __device_suspend_late - Execute a "late suspend" callback for given device.
1473 * @dev: Device to handle.
1474 * @state: PM transition of the system being carried out.
1475 * @async: If true, the device is being suspended asynchronously.
1476 *
1477 * Runtime PM is disabled for @dev while this function is being executed.
1478 */
__device_suspend_late(struct device * dev,pm_message_t state,bool async)1479 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1480 {
1481 pm_callback_t callback;
1482 const char *info;
1483 int error = 0;
1484
1485 TRACE_DEVICE(dev);
1486 TRACE_SUSPEND(0);
1487
1488 __pm_runtime_disable(dev, false);
1489
1490 dpm_wait_for_subordinate(dev, async);
1491
1492 if (async_error)
1493 goto Complete;
1494
1495 if (pm_wakeup_pending()) {
1496 async_error = -EBUSY;
1497 goto Complete;
1498 }
1499
1500 if (dev->power.syscore || dev->power.direct_complete)
1501 goto Complete;
1502
1503 callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1504 if (callback)
1505 goto Run;
1506
1507 if (dev_pm_smart_suspend_and_suspended(dev) &&
1508 !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1509 goto Skip;
1510
1511 if (dev->driver && dev->driver->pm) {
1512 info = "late driver ";
1513 callback = pm_late_early_op(dev->driver->pm, state);
1514 }
1515
1516 Run:
1517 error = dpm_run_callback(callback, dev, state, info);
1518 if (error) {
1519 async_error = error;
1520 goto Complete;
1521 }
1522 dpm_propagate_wakeup_to_parent(dev);
1523
1524 Skip:
1525 dev->power.is_late_suspended = true;
1526
1527 Complete:
1528 TRACE_SUSPEND(error);
1529 complete_all(&dev->power.completion);
1530 return error;
1531 }
1532
async_suspend_late(void * data,async_cookie_t cookie)1533 static void async_suspend_late(void *data, async_cookie_t cookie)
1534 {
1535 struct device *dev = (struct device *)data;
1536 int error;
1537
1538 error = __device_suspend_late(dev, pm_transition, true);
1539 if (error) {
1540 dpm_save_failed_dev(dev_name(dev));
1541 pm_dev_err(dev, pm_transition, " async", error);
1542 }
1543 put_device(dev);
1544 }
1545
device_suspend_late(struct device * dev)1546 static int device_suspend_late(struct device *dev)
1547 {
1548 if (dpm_async_fn(dev, async_suspend_late))
1549 return 0;
1550
1551 return __device_suspend_late(dev, pm_transition, false);
1552 }
1553
1554 /**
1555 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1556 * @state: PM transition of the system being carried out.
1557 */
dpm_suspend_late(pm_message_t state)1558 int dpm_suspend_late(pm_message_t state)
1559 {
1560 ktime_t starttime = ktime_get();
1561 int error = 0;
1562
1563 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1564 mutex_lock(&dpm_list_mtx);
1565 pm_transition = state;
1566 async_error = 0;
1567
1568 while (!list_empty(&dpm_suspended_list)) {
1569 struct device *dev = to_device(dpm_suspended_list.prev);
1570
1571 get_device(dev);
1572 mutex_unlock(&dpm_list_mtx);
1573
1574 error = device_suspend_late(dev);
1575
1576 mutex_lock(&dpm_list_mtx);
1577 if (!list_empty(&dev->power.entry))
1578 list_move(&dev->power.entry, &dpm_late_early_list);
1579
1580 if (error) {
1581 pm_dev_err(dev, state, " late", error);
1582 dpm_save_failed_dev(dev_name(dev));
1583 put_device(dev);
1584 break;
1585 }
1586 put_device(dev);
1587
1588 if (async_error)
1589 break;
1590 }
1591 mutex_unlock(&dpm_list_mtx);
1592 async_synchronize_full();
1593 if (!error)
1594 error = async_error;
1595 if (error) {
1596 suspend_stats.failed_suspend_late++;
1597 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1598 dpm_resume_early(resume_event(state));
1599 }
1600 dpm_show_time(starttime, state, error, "late");
1601 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1602 return error;
1603 }
1604
1605 /**
1606 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1607 * @state: PM transition of the system being carried out.
1608 */
dpm_suspend_end(pm_message_t state)1609 int dpm_suspend_end(pm_message_t state)
1610 {
1611 ktime_t starttime = ktime_get();
1612 int error;
1613
1614 error = dpm_suspend_late(state);
1615 if (error)
1616 goto out;
1617
1618 error = dpm_suspend_noirq(state);
1619 if (error)
1620 dpm_resume_early(resume_event(state));
1621
1622 out:
1623 dpm_show_time(starttime, state, error, "end");
1624 return error;
1625 }
1626 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1627
1628 /**
1629 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1630 * @dev: Device to suspend.
1631 * @state: PM transition of the system being carried out.
1632 * @cb: Suspend callback to execute.
1633 * @info: string description of caller.
1634 */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1635 static int legacy_suspend(struct device *dev, pm_message_t state,
1636 int (*cb)(struct device *dev, pm_message_t state),
1637 const char *info)
1638 {
1639 int error;
1640 ktime_t calltime;
1641
1642 calltime = initcall_debug_start(dev, cb);
1643
1644 trace_device_pm_callback_start(dev, info, state.event);
1645 error = cb(dev, state);
1646 trace_device_pm_callback_end(dev, error);
1647 suspend_report_result(cb, error);
1648
1649 initcall_debug_report(dev, calltime, cb, error);
1650
1651 return error;
1652 }
1653
dpm_clear_superiors_direct_complete(struct device * dev)1654 static void dpm_clear_superiors_direct_complete(struct device *dev)
1655 {
1656 struct device_link *link;
1657 int idx;
1658
1659 if (dev->parent) {
1660 spin_lock_irq(&dev->parent->power.lock);
1661 dev->parent->power.direct_complete = false;
1662 spin_unlock_irq(&dev->parent->power.lock);
1663 }
1664
1665 idx = device_links_read_lock();
1666
1667 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1668 spin_lock_irq(&link->supplier->power.lock);
1669 link->supplier->power.direct_complete = false;
1670 spin_unlock_irq(&link->supplier->power.lock);
1671 }
1672
1673 device_links_read_unlock(idx);
1674 }
1675
1676 /**
1677 * __device_suspend - Execute "suspend" callbacks for given device.
1678 * @dev: Device to handle.
1679 * @state: PM transition of the system being carried out.
1680 * @async: If true, the device is being suspended asynchronously.
1681 */
__device_suspend(struct device * dev,pm_message_t state,bool async)1682 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1683 {
1684 pm_callback_t callback = NULL;
1685 const char *info = NULL;
1686 int error = 0;
1687 char suspend_abort[MAX_SUSPEND_ABORT_LEN];
1688 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1689
1690 TRACE_DEVICE(dev);
1691 TRACE_SUSPEND(0);
1692
1693 dpm_wait_for_subordinate(dev, async);
1694
1695 if (async_error) {
1696 dev->power.direct_complete = false;
1697 goto Complete;
1698 }
1699
1700 /*
1701 * If a device configured to wake up the system from sleep states
1702 * has been suspended at run time and there's a resume request pending
1703 * for it, this is equivalent to the device signaling wakeup, so the
1704 * system suspend operation should be aborted.
1705 */
1706 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1707 pm_wakeup_event(dev, 0);
1708
1709 if (pm_wakeup_pending()) {
1710 dev->power.direct_complete = false;
1711 pm_get_active_wakeup_sources(suspend_abort,
1712 MAX_SUSPEND_ABORT_LEN);
1713 log_suspend_abort_reason(suspend_abort);
1714 async_error = -EBUSY;
1715 goto Complete;
1716 }
1717
1718 if (dev->power.syscore)
1719 goto Complete;
1720
1721 /* Avoid direct_complete to let wakeup_path propagate. */
1722 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1723 dev->power.direct_complete = false;
1724
1725 if (dev->power.direct_complete) {
1726 if (pm_runtime_status_suspended(dev)) {
1727 pm_runtime_disable(dev);
1728 if (pm_runtime_status_suspended(dev)) {
1729 pm_dev_dbg(dev, state, "direct-complete ");
1730 goto Complete;
1731 }
1732
1733 pm_runtime_enable(dev);
1734 }
1735 dev->power.direct_complete = false;
1736 }
1737
1738 dev->power.may_skip_resume = false;
1739 dev->power.must_resume = false;
1740
1741 dpm_watchdog_set(&wd, dev);
1742 device_lock(dev);
1743
1744 if (dev->pm_domain) {
1745 info = "power domain ";
1746 callback = pm_op(&dev->pm_domain->ops, state);
1747 goto Run;
1748 }
1749
1750 if (dev->type && dev->type->pm) {
1751 info = "type ";
1752 callback = pm_op(dev->type->pm, state);
1753 goto Run;
1754 }
1755
1756 if (dev->class && dev->class->pm) {
1757 info = "class ";
1758 callback = pm_op(dev->class->pm, state);
1759 goto Run;
1760 }
1761
1762 if (dev->bus) {
1763 if (dev->bus->pm) {
1764 info = "bus ";
1765 callback = pm_op(dev->bus->pm, state);
1766 } else if (dev->bus->suspend) {
1767 pm_dev_dbg(dev, state, "legacy bus ");
1768 error = legacy_suspend(dev, state, dev->bus->suspend,
1769 "legacy bus ");
1770 goto End;
1771 }
1772 }
1773
1774 Run:
1775 if (!callback && dev->driver && dev->driver->pm) {
1776 info = "driver ";
1777 callback = pm_op(dev->driver->pm, state);
1778 }
1779
1780 error = dpm_run_callback(callback, dev, state, info);
1781
1782 End:
1783 if (!error) {
1784 dev->power.is_suspended = true;
1785 if (device_may_wakeup(dev))
1786 dev->power.wakeup_path = true;
1787
1788 dpm_propagate_wakeup_to_parent(dev);
1789 dpm_clear_superiors_direct_complete(dev);
1790 }
1791
1792 device_unlock(dev);
1793 dpm_watchdog_clear(&wd);
1794
1795 Complete:
1796 if (error)
1797 async_error = error;
1798
1799 complete_all(&dev->power.completion);
1800 TRACE_SUSPEND(error);
1801 return error;
1802 }
1803
async_suspend(void * data,async_cookie_t cookie)1804 static void async_suspend(void *data, async_cookie_t cookie)
1805 {
1806 struct device *dev = (struct device *)data;
1807 int error;
1808
1809 error = __device_suspend(dev, pm_transition, true);
1810 if (error) {
1811 dpm_save_failed_dev(dev_name(dev));
1812 pm_dev_err(dev, pm_transition, " async", error);
1813 }
1814
1815 put_device(dev);
1816 }
1817
device_suspend(struct device * dev)1818 static int device_suspend(struct device *dev)
1819 {
1820 if (dpm_async_fn(dev, async_suspend))
1821 return 0;
1822
1823 return __device_suspend(dev, pm_transition, false);
1824 }
1825
1826 /**
1827 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1828 * @state: PM transition of the system being carried out.
1829 */
dpm_suspend(pm_message_t state)1830 int dpm_suspend(pm_message_t state)
1831 {
1832 ktime_t starttime = ktime_get();
1833 int error = 0;
1834
1835 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1836 might_sleep();
1837
1838 devfreq_suspend();
1839 cpufreq_suspend();
1840
1841 mutex_lock(&dpm_list_mtx);
1842 pm_transition = state;
1843 async_error = 0;
1844 while (!list_empty(&dpm_prepared_list)) {
1845 struct device *dev = to_device(dpm_prepared_list.prev);
1846
1847 get_device(dev);
1848 mutex_unlock(&dpm_list_mtx);
1849
1850 error = device_suspend(dev);
1851
1852 mutex_lock(&dpm_list_mtx);
1853 if (error) {
1854 pm_dev_err(dev, state, "", error);
1855 dpm_save_failed_dev(dev_name(dev));
1856 put_device(dev);
1857 break;
1858 }
1859 if (!list_empty(&dev->power.entry))
1860 list_move(&dev->power.entry, &dpm_suspended_list);
1861 put_device(dev);
1862 if (async_error)
1863 break;
1864 }
1865 mutex_unlock(&dpm_list_mtx);
1866 async_synchronize_full();
1867 if (!error)
1868 error = async_error;
1869 if (error) {
1870 suspend_stats.failed_suspend++;
1871 dpm_save_failed_step(SUSPEND_SUSPEND);
1872 }
1873 dpm_show_time(starttime, state, error, NULL);
1874 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1875 return error;
1876 }
1877
1878 /**
1879 * device_prepare - Prepare a device for system power transition.
1880 * @dev: Device to handle.
1881 * @state: PM transition of the system being carried out.
1882 *
1883 * Execute the ->prepare() callback(s) for given device. No new children of the
1884 * device may be registered after this function has returned.
1885 */
device_prepare(struct device * dev,pm_message_t state)1886 static int device_prepare(struct device *dev, pm_message_t state)
1887 {
1888 int (*callback)(struct device *) = NULL;
1889 int ret = 0;
1890
1891 if (dev->power.syscore)
1892 return 0;
1893
1894 WARN_ON(!pm_runtime_enabled(dev) &&
1895 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1896 DPM_FLAG_LEAVE_SUSPENDED));
1897
1898 /*
1899 * If a device's parent goes into runtime suspend at the wrong time,
1900 * it won't be possible to resume the device. To prevent this we
1901 * block runtime suspend here, during the prepare phase, and allow
1902 * it again during the complete phase.
1903 */
1904 pm_runtime_get_noresume(dev);
1905
1906 device_lock(dev);
1907
1908 dev->power.wakeup_path = false;
1909
1910 if (dev->power.no_pm_callbacks)
1911 goto unlock;
1912
1913 if (dev->pm_domain)
1914 callback = dev->pm_domain->ops.prepare;
1915 else if (dev->type && dev->type->pm)
1916 callback = dev->type->pm->prepare;
1917 else if (dev->class && dev->class->pm)
1918 callback = dev->class->pm->prepare;
1919 else if (dev->bus && dev->bus->pm)
1920 callback = dev->bus->pm->prepare;
1921
1922 if (!callback && dev->driver && dev->driver->pm)
1923 callback = dev->driver->pm->prepare;
1924
1925 if (callback)
1926 ret = callback(dev);
1927
1928 unlock:
1929 device_unlock(dev);
1930
1931 if (ret < 0) {
1932 suspend_report_result(callback, ret);
1933 pm_runtime_put(dev);
1934 return ret;
1935 }
1936 /*
1937 * A positive return value from ->prepare() means "this device appears
1938 * to be runtime-suspended and its state is fine, so if it really is
1939 * runtime-suspended, you can leave it in that state provided that you
1940 * will do the same thing with all of its descendants". This only
1941 * applies to suspend transitions, however.
1942 */
1943 spin_lock_irq(&dev->power.lock);
1944 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1945 ((pm_runtime_suspended(dev) && ret > 0) ||
1946 dev->power.no_pm_callbacks) &&
1947 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1948 spin_unlock_irq(&dev->power.lock);
1949 return 0;
1950 }
1951
1952 /**
1953 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1954 * @state: PM transition of the system being carried out.
1955 *
1956 * Execute the ->prepare() callback(s) for all devices.
1957 */
dpm_prepare(pm_message_t state)1958 int dpm_prepare(pm_message_t state)
1959 {
1960 int error = 0;
1961
1962 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1963 might_sleep();
1964
1965 /*
1966 * Give a chance for the known devices to complete their probes, before
1967 * disable probing of devices. This sync point is important at least
1968 * at boot time + hibernation restore.
1969 */
1970 wait_for_device_probe();
1971 /*
1972 * It is unsafe if probing of devices will happen during suspend or
1973 * hibernation and system behavior will be unpredictable in this case.
1974 * So, let's prohibit device's probing here and defer their probes
1975 * instead. The normal behavior will be restored in dpm_complete().
1976 */
1977 device_block_probing();
1978
1979 mutex_lock(&dpm_list_mtx);
1980 while (!list_empty(&dpm_list)) {
1981 struct device *dev = to_device(dpm_list.next);
1982
1983 get_device(dev);
1984 mutex_unlock(&dpm_list_mtx);
1985
1986 trace_device_pm_callback_start(dev, "", state.event);
1987 error = device_prepare(dev, state);
1988 trace_device_pm_callback_end(dev, error);
1989
1990 mutex_lock(&dpm_list_mtx);
1991 if (error) {
1992 if (error == -EAGAIN) {
1993 put_device(dev);
1994 error = 0;
1995 continue;
1996 }
1997 pr_info("Device %s not prepared for power transition: code %d\n",
1998 dev_name(dev), error);
1999 dpm_save_failed_dev(dev_name(dev));
2000 put_device(dev);
2001 break;
2002 }
2003 dev->power.is_prepared = true;
2004 if (!list_empty(&dev->power.entry))
2005 list_move_tail(&dev->power.entry, &dpm_prepared_list);
2006 put_device(dev);
2007 }
2008 mutex_unlock(&dpm_list_mtx);
2009 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2010 return error;
2011 }
2012
2013 /**
2014 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2015 * @state: PM transition of the system being carried out.
2016 *
2017 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2018 * callbacks for them.
2019 */
dpm_suspend_start(pm_message_t state)2020 int dpm_suspend_start(pm_message_t state)
2021 {
2022 ktime_t starttime = ktime_get();
2023 int error;
2024
2025 error = dpm_prepare(state);
2026 if (error) {
2027 suspend_stats.failed_prepare++;
2028 dpm_save_failed_step(SUSPEND_PREPARE);
2029 } else
2030 error = dpm_suspend(state);
2031 dpm_show_time(starttime, state, error, "start");
2032 return error;
2033 }
2034 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2035
__suspend_report_result(const char * function,void * fn,int ret)2036 void __suspend_report_result(const char *function, void *fn, int ret)
2037 {
2038 if (ret)
2039 pr_err("%s(): %pS returns %d\n", function, fn, ret);
2040 }
2041 EXPORT_SYMBOL_GPL(__suspend_report_result);
2042
2043 /**
2044 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2045 * @subordinate: Device that needs to wait for @dev.
2046 * @dev: Device to wait for.
2047 */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)2048 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2049 {
2050 dpm_wait(dev, subordinate->power.async_suspend);
2051 return async_error;
2052 }
2053 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2054
2055 /**
2056 * dpm_for_each_dev - device iterator.
2057 * @data: data for the callback.
2058 * @fn: function to be called for each device.
2059 *
2060 * Iterate over devices in dpm_list, and call @fn for each device,
2061 * passing it @data.
2062 */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))2063 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2064 {
2065 struct device *dev;
2066
2067 if (!fn)
2068 return;
2069
2070 device_pm_lock();
2071 list_for_each_entry(dev, &dpm_list, power.entry)
2072 fn(dev, data);
2073 device_pm_unlock();
2074 }
2075 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2076
pm_ops_is_empty(const struct dev_pm_ops * ops)2077 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2078 {
2079 if (!ops)
2080 return true;
2081
2082 return !ops->prepare &&
2083 !ops->suspend &&
2084 !ops->suspend_late &&
2085 !ops->suspend_noirq &&
2086 !ops->resume_noirq &&
2087 !ops->resume_early &&
2088 !ops->resume &&
2089 !ops->complete;
2090 }
2091
device_pm_check_callbacks(struct device * dev)2092 void device_pm_check_callbacks(struct device *dev)
2093 {
2094 spin_lock_irq(&dev->power.lock);
2095 dev->power.no_pm_callbacks =
2096 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2097 !dev->bus->suspend && !dev->bus->resume)) &&
2098 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2099 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2100 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2101 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2102 !dev->driver->suspend && !dev->driver->resume));
2103 spin_unlock_irq(&dev->power.lock);
2104 }
2105
dev_pm_smart_suspend_and_suspended(struct device * dev)2106 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2107 {
2108 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2109 pm_runtime_status_suspended(dev);
2110 }
2111