• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20 
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 #include <linux/wakeup_reason.h>
38 
39 #include "../base.h"
40 #include "power.h"
41 
42 typedef int (*pm_callback_t)(struct device *);
43 
44 #define list_for_each_entry_rcu_locked(pos, head, member) \
45 	list_for_each_entry_rcu(pos, head, member, \
46 			device_links_read_lock_held())
47 
48 /*
49  * The entries in the dpm_list list are in a depth first order, simply
50  * because children are guaranteed to be discovered after parents, and
51  * are inserted at the back of the list on discovery.
52  *
53  * Since device_pm_add() may be called with a device lock held,
54  * we must never try to acquire a device lock while holding
55  * dpm_list_mutex.
56  */
57 
58 LIST_HEAD(dpm_list);
59 static LIST_HEAD(dpm_prepared_list);
60 static LIST_HEAD(dpm_suspended_list);
61 static LIST_HEAD(dpm_late_early_list);
62 static LIST_HEAD(dpm_noirq_list);
63 
64 struct suspend_stats suspend_stats;
65 static DEFINE_MUTEX(dpm_list_mtx);
66 static pm_message_t pm_transition;
67 
68 static int async_error;
69 
pm_verb(int event)70 static const char *pm_verb(int event)
71 {
72 	switch (event) {
73 	case PM_EVENT_SUSPEND:
74 		return "suspend";
75 	case PM_EVENT_RESUME:
76 		return "resume";
77 	case PM_EVENT_FREEZE:
78 		return "freeze";
79 	case PM_EVENT_QUIESCE:
80 		return "quiesce";
81 	case PM_EVENT_HIBERNATE:
82 		return "hibernate";
83 	case PM_EVENT_THAW:
84 		return "thaw";
85 	case PM_EVENT_RESTORE:
86 		return "restore";
87 	case PM_EVENT_RECOVER:
88 		return "recover";
89 	default:
90 		return "(unknown PM event)";
91 	}
92 }
93 
94 /**
95  * device_pm_sleep_init - Initialize system suspend-related device fields.
96  * @dev: Device object being initialized.
97  */
device_pm_sleep_init(struct device * dev)98 void device_pm_sleep_init(struct device *dev)
99 {
100 	dev->power.is_prepared = false;
101 	dev->power.is_suspended = false;
102 	dev->power.is_noirq_suspended = false;
103 	dev->power.is_late_suspended = false;
104 	init_completion(&dev->power.completion);
105 	complete_all(&dev->power.completion);
106 	dev->power.wakeup = NULL;
107 	INIT_LIST_HEAD(&dev->power.entry);
108 }
109 
110 /**
111  * device_pm_lock - Lock the list of active devices used by the PM core.
112  */
device_pm_lock(void)113 void device_pm_lock(void)
114 {
115 	mutex_lock(&dpm_list_mtx);
116 }
117 
118 /**
119  * device_pm_unlock - Unlock the list of active devices used by the PM core.
120  */
device_pm_unlock(void)121 void device_pm_unlock(void)
122 {
123 	mutex_unlock(&dpm_list_mtx);
124 }
125 
126 /**
127  * device_pm_add - Add a device to the PM core's list of active devices.
128  * @dev: Device to add to the list.
129  */
device_pm_add(struct device * dev)130 void device_pm_add(struct device *dev)
131 {
132 	/* Skip PM setup/initialization. */
133 	if (device_pm_not_required(dev))
134 		return;
135 
136 	pr_debug("Adding info for %s:%s\n",
137 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
138 	device_pm_check_callbacks(dev);
139 	mutex_lock(&dpm_list_mtx);
140 	if (dev->parent && dev->parent->power.is_prepared)
141 		dev_warn(dev, "parent %s should not be sleeping\n",
142 			dev_name(dev->parent));
143 	list_add_tail(&dev->power.entry, &dpm_list);
144 	dev->power.in_dpm_list = true;
145 	mutex_unlock(&dpm_list_mtx);
146 }
147 
148 /**
149  * device_pm_remove - Remove a device from the PM core's list of active devices.
150  * @dev: Device to be removed from the list.
151  */
device_pm_remove(struct device * dev)152 void device_pm_remove(struct device *dev)
153 {
154 	if (device_pm_not_required(dev))
155 		return;
156 
157 	pr_debug("Removing info for %s:%s\n",
158 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159 	complete_all(&dev->power.completion);
160 	mutex_lock(&dpm_list_mtx);
161 	list_del_init(&dev->power.entry);
162 	dev->power.in_dpm_list = false;
163 	mutex_unlock(&dpm_list_mtx);
164 	device_wakeup_disable(dev);
165 	pm_runtime_remove(dev);
166 	device_pm_check_callbacks(dev);
167 }
168 
169 /**
170  * device_pm_move_before - Move device in the PM core's list of active devices.
171  * @deva: Device to move in dpm_list.
172  * @devb: Device @deva should come before.
173  */
device_pm_move_before(struct device * deva,struct device * devb)174 void device_pm_move_before(struct device *deva, struct device *devb)
175 {
176 	pr_debug("Moving %s:%s before %s:%s\n",
177 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
178 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
179 	/* Delete deva from dpm_list and reinsert before devb. */
180 	list_move_tail(&deva->power.entry, &devb->power.entry);
181 }
182 
183 /**
184  * device_pm_move_after - Move device in the PM core's list of active devices.
185  * @deva: Device to move in dpm_list.
186  * @devb: Device @deva should come after.
187  */
device_pm_move_after(struct device * deva,struct device * devb)188 void device_pm_move_after(struct device *deva, struct device *devb)
189 {
190 	pr_debug("Moving %s:%s after %s:%s\n",
191 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
192 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
193 	/* Delete deva from dpm_list and reinsert after devb. */
194 	list_move(&deva->power.entry, &devb->power.entry);
195 }
196 
197 /**
198  * device_pm_move_last - Move device to end of the PM core's list of devices.
199  * @dev: Device to move in dpm_list.
200  */
device_pm_move_last(struct device * dev)201 void device_pm_move_last(struct device *dev)
202 {
203 	pr_debug("Moving %s:%s to end of list\n",
204 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
205 	list_move_tail(&dev->power.entry, &dpm_list);
206 }
207 
initcall_debug_start(struct device * dev,void * cb)208 static ktime_t initcall_debug_start(struct device *dev, void *cb)
209 {
210 	if (!pm_print_times_enabled)
211 		return 0;
212 
213 	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
214 		 task_pid_nr(current),
215 		 dev->parent ? dev_name(dev->parent) : "none");
216 	return ktime_get();
217 }
218 
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)219 static void initcall_debug_report(struct device *dev, ktime_t calltime,
220 				  void *cb, int error)
221 {
222 	ktime_t rettime;
223 
224 	if (!pm_print_times_enabled)
225 		return;
226 
227 	rettime = ktime_get();
228 	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
229 		 (unsigned long long)ktime_us_delta(rettime, calltime));
230 }
231 
232 /**
233  * dpm_wait - Wait for a PM operation to complete.
234  * @dev: Device to wait for.
235  * @async: If unset, wait only if the device's power.async_suspend flag is set.
236  */
dpm_wait(struct device * dev,bool async)237 static void dpm_wait(struct device *dev, bool async)
238 {
239 	if (!dev)
240 		return;
241 
242 	if (async || (pm_async_enabled && dev->power.async_suspend))
243 		wait_for_completion(&dev->power.completion);
244 }
245 
dpm_wait_fn(struct device * dev,void * async_ptr)246 static int dpm_wait_fn(struct device *dev, void *async_ptr)
247 {
248 	dpm_wait(dev, *((bool *)async_ptr));
249 	return 0;
250 }
251 
dpm_wait_for_children(struct device * dev,bool async)252 static void dpm_wait_for_children(struct device *dev, bool async)
253 {
254        device_for_each_child(dev, &async, dpm_wait_fn);
255 }
256 
dpm_wait_for_suppliers(struct device * dev,bool async)257 static void dpm_wait_for_suppliers(struct device *dev, bool async)
258 {
259 	struct device_link *link;
260 	int idx;
261 
262 	idx = device_links_read_lock();
263 
264 	/*
265 	 * If the supplier goes away right after we've checked the link to it,
266 	 * we'll wait for its completion to change the state, but that's fine,
267 	 * because the only things that will block as a result are the SRCU
268 	 * callbacks freeing the link objects for the links in the list we're
269 	 * walking.
270 	 */
271 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
272 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
273 			dpm_wait(link->supplier, async);
274 
275 	device_links_read_unlock(idx);
276 }
277 
dpm_wait_for_superior(struct device * dev,bool async)278 static bool dpm_wait_for_superior(struct device *dev, bool async)
279 {
280 	struct device *parent;
281 
282 	/*
283 	 * If the device is resumed asynchronously and the parent's callback
284 	 * deletes both the device and the parent itself, the parent object may
285 	 * be freed while this function is running, so avoid that by reference
286 	 * counting the parent once more unless the device has been deleted
287 	 * already (in which case return right away).
288 	 */
289 	mutex_lock(&dpm_list_mtx);
290 
291 	if (!device_pm_initialized(dev)) {
292 		mutex_unlock(&dpm_list_mtx);
293 		return false;
294 	}
295 
296 	parent = get_device(dev->parent);
297 
298 	mutex_unlock(&dpm_list_mtx);
299 
300 	dpm_wait(parent, async);
301 	put_device(parent);
302 
303 	dpm_wait_for_suppliers(dev, async);
304 
305 	/*
306 	 * If the parent's callback has deleted the device, attempting to resume
307 	 * it would be invalid, so avoid doing that then.
308 	 */
309 	return device_pm_initialized(dev);
310 }
311 
dpm_wait_for_consumers(struct device * dev,bool async)312 static void dpm_wait_for_consumers(struct device *dev, bool async)
313 {
314 	struct device_link *link;
315 	int idx;
316 
317 	idx = device_links_read_lock();
318 
319 	/*
320 	 * The status of a device link can only be changed from "dormant" by a
321 	 * probe, but that cannot happen during system suspend/resume.  In
322 	 * theory it can change to "dormant" at that time, but then it is
323 	 * reasonable to wait for the target device anyway (eg. if it goes
324 	 * away, it's better to wait for it to go away completely and then
325 	 * continue instead of trying to continue in parallel with its
326 	 * unregistration).
327 	 */
328 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
329 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
330 			dpm_wait(link->consumer, async);
331 
332 	device_links_read_unlock(idx);
333 }
334 
dpm_wait_for_subordinate(struct device * dev,bool async)335 static void dpm_wait_for_subordinate(struct device *dev, bool async)
336 {
337 	dpm_wait_for_children(dev, async);
338 	dpm_wait_for_consumers(dev, async);
339 }
340 
341 /**
342  * pm_op - Return the PM operation appropriate for given PM event.
343  * @ops: PM operations to choose from.
344  * @state: PM transition of the system being carried out.
345  */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)346 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
347 {
348 	switch (state.event) {
349 #ifdef CONFIG_SUSPEND
350 	case PM_EVENT_SUSPEND:
351 		return ops->suspend;
352 	case PM_EVENT_RESUME:
353 		return ops->resume;
354 #endif /* CONFIG_SUSPEND */
355 #ifdef CONFIG_HIBERNATE_CALLBACKS
356 	case PM_EVENT_FREEZE:
357 	case PM_EVENT_QUIESCE:
358 		return ops->freeze;
359 	case PM_EVENT_HIBERNATE:
360 		return ops->poweroff;
361 	case PM_EVENT_THAW:
362 	case PM_EVENT_RECOVER:
363 		return ops->thaw;
364 	case PM_EVENT_RESTORE:
365 		return ops->restore;
366 #endif /* CONFIG_HIBERNATE_CALLBACKS */
367 	}
368 
369 	return NULL;
370 }
371 
372 /**
373  * pm_late_early_op - Return the PM operation appropriate for given PM event.
374  * @ops: PM operations to choose from.
375  * @state: PM transition of the system being carried out.
376  *
377  * Runtime PM is disabled for @dev while this function is being executed.
378  */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)379 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
380 				      pm_message_t state)
381 {
382 	switch (state.event) {
383 #ifdef CONFIG_SUSPEND
384 	case PM_EVENT_SUSPEND:
385 		return ops->suspend_late;
386 	case PM_EVENT_RESUME:
387 		return ops->resume_early;
388 #endif /* CONFIG_SUSPEND */
389 #ifdef CONFIG_HIBERNATE_CALLBACKS
390 	case PM_EVENT_FREEZE:
391 	case PM_EVENT_QUIESCE:
392 		return ops->freeze_late;
393 	case PM_EVENT_HIBERNATE:
394 		return ops->poweroff_late;
395 	case PM_EVENT_THAW:
396 	case PM_EVENT_RECOVER:
397 		return ops->thaw_early;
398 	case PM_EVENT_RESTORE:
399 		return ops->restore_early;
400 #endif /* CONFIG_HIBERNATE_CALLBACKS */
401 	}
402 
403 	return NULL;
404 }
405 
406 /**
407  * pm_noirq_op - Return the PM operation appropriate for given PM event.
408  * @ops: PM operations to choose from.
409  * @state: PM transition of the system being carried out.
410  *
411  * The driver of @dev will not receive interrupts while this function is being
412  * executed.
413  */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)414 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
415 {
416 	switch (state.event) {
417 #ifdef CONFIG_SUSPEND
418 	case PM_EVENT_SUSPEND:
419 		return ops->suspend_noirq;
420 	case PM_EVENT_RESUME:
421 		return ops->resume_noirq;
422 #endif /* CONFIG_SUSPEND */
423 #ifdef CONFIG_HIBERNATE_CALLBACKS
424 	case PM_EVENT_FREEZE:
425 	case PM_EVENT_QUIESCE:
426 		return ops->freeze_noirq;
427 	case PM_EVENT_HIBERNATE:
428 		return ops->poweroff_noirq;
429 	case PM_EVENT_THAW:
430 	case PM_EVENT_RECOVER:
431 		return ops->thaw_noirq;
432 	case PM_EVENT_RESTORE:
433 		return ops->restore_noirq;
434 #endif /* CONFIG_HIBERNATE_CALLBACKS */
435 	}
436 
437 	return NULL;
438 }
439 
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)440 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
441 {
442 	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
443 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
444 		", may wakeup" : "", dev->power.driver_flags);
445 }
446 
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)447 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
448 			int error)
449 {
450 	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
451 		error);
452 }
453 
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)454 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
455 			  const char *info)
456 {
457 	ktime_t calltime;
458 	u64 usecs64;
459 	int usecs;
460 
461 	calltime = ktime_get();
462 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
463 	do_div(usecs64, NSEC_PER_USEC);
464 	usecs = usecs64;
465 	if (usecs == 0)
466 		usecs = 1;
467 
468 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
469 		  info ?: "", info ? " " : "", pm_verb(state.event),
470 		  error ? "aborted" : "complete",
471 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
472 }
473 
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)474 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
475 			    pm_message_t state, const char *info)
476 {
477 	ktime_t calltime;
478 	int error;
479 
480 	if (!cb)
481 		return 0;
482 
483 	calltime = initcall_debug_start(dev, cb);
484 
485 	pm_dev_dbg(dev, state, info);
486 	trace_device_pm_callback_start(dev, info, state.event);
487 	error = cb(dev);
488 	trace_device_pm_callback_end(dev, error);
489 	suspend_report_result(dev, cb, error);
490 
491 	initcall_debug_report(dev, calltime, cb, error);
492 
493 	return error;
494 }
495 
496 #ifdef CONFIG_DPM_WATCHDOG
497 struct dpm_watchdog {
498 	struct device		*dev;
499 	struct task_struct	*tsk;
500 	struct timer_list	timer;
501 };
502 
503 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
504 	struct dpm_watchdog wd
505 
506 /**
507  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
508  * @t: The timer that PM watchdog depends on.
509  *
510  * Called when a driver has timed out suspending or resuming.
511  * There's not much we can do here to recover so panic() to
512  * capture a crash-dump in pstore.
513  */
dpm_watchdog_handler(struct timer_list * t)514 static void dpm_watchdog_handler(struct timer_list *t)
515 {
516 	struct dpm_watchdog *wd = from_timer(wd, t, timer);
517 
518 	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
519 	show_stack(wd->tsk, NULL, KERN_EMERG);
520 	panic("%s %s: unrecoverable failure\n",
521 		dev_driver_string(wd->dev), dev_name(wd->dev));
522 }
523 
524 /**
525  * dpm_watchdog_set - Enable pm watchdog for given device.
526  * @wd: Watchdog. Must be allocated on the stack.
527  * @dev: Device to handle.
528  */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)529 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
530 {
531 	struct timer_list *timer = &wd->timer;
532 
533 	wd->dev = dev;
534 	wd->tsk = current;
535 
536 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
537 	/* use same timeout value for both suspend and resume */
538 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
539 	add_timer(timer);
540 }
541 
542 /**
543  * dpm_watchdog_clear - Disable suspend/resume watchdog.
544  * @wd: Watchdog to disable.
545  */
dpm_watchdog_clear(struct dpm_watchdog * wd)546 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
547 {
548 	struct timer_list *timer = &wd->timer;
549 
550 	del_timer_sync(timer);
551 	destroy_timer_on_stack(timer);
552 }
553 #else
554 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
555 #define dpm_watchdog_set(x, y)
556 #define dpm_watchdog_clear(x)
557 #endif
558 
559 /*------------------------- Resume routines -------------------------*/
560 
561 /**
562  * dev_pm_skip_resume - System-wide device resume optimization check.
563  * @dev: Target device.
564  *
565  * Return:
566  * - %false if the transition under way is RESTORE.
567  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
568  * - The logical negation of %power.must_resume otherwise (that is, when the
569  *   transition under way is RESUME).
570  */
dev_pm_skip_resume(struct device * dev)571 bool dev_pm_skip_resume(struct device *dev)
572 {
573 	if (pm_transition.event == PM_EVENT_RESTORE)
574 		return false;
575 
576 	if (pm_transition.event == PM_EVENT_THAW)
577 		return dev_pm_skip_suspend(dev);
578 
579 	return !dev->power.must_resume;
580 }
581 
582 /**
583  * __device_resume_noirq - Execute a "noirq resume" callback for given device.
584  * @dev: Device to handle.
585  * @state: PM transition of the system being carried out.
586  * @async: If true, the device is being resumed asynchronously.
587  *
588  * The driver of @dev will not receive interrupts while this function is being
589  * executed.
590  */
__device_resume_noirq(struct device * dev,pm_message_t state,bool async)591 static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
592 {
593 	pm_callback_t callback = NULL;
594 	const char *info = NULL;
595 	bool skip_resume;
596 	int error = 0;
597 
598 	TRACE_DEVICE(dev);
599 	TRACE_RESUME(0);
600 
601 	if (dev->power.syscore || dev->power.direct_complete)
602 		goto Out;
603 
604 	if (!dev->power.is_noirq_suspended)
605 		goto Out;
606 
607 	if (!dpm_wait_for_superior(dev, async))
608 		goto Out;
609 
610 	skip_resume = dev_pm_skip_resume(dev);
611 	/*
612 	 * If the driver callback is skipped below or by the middle layer
613 	 * callback and device_resume_early() also skips the driver callback for
614 	 * this device later, it needs to appear as "suspended" to PM-runtime,
615 	 * so change its status accordingly.
616 	 *
617 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
618 	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
619 	 * to avoid confusing drivers that don't use it.
620 	 */
621 	if (skip_resume)
622 		pm_runtime_set_suspended(dev);
623 	else if (dev_pm_skip_suspend(dev))
624 		pm_runtime_set_active(dev);
625 
626 	if (dev->pm_domain) {
627 		info = "noirq power domain ";
628 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
629 	} else if (dev->type && dev->type->pm) {
630 		info = "noirq type ";
631 		callback = pm_noirq_op(dev->type->pm, state);
632 	} else if (dev->class && dev->class->pm) {
633 		info = "noirq class ";
634 		callback = pm_noirq_op(dev->class->pm, state);
635 	} else if (dev->bus && dev->bus->pm) {
636 		info = "noirq bus ";
637 		callback = pm_noirq_op(dev->bus->pm, state);
638 	}
639 	if (callback)
640 		goto Run;
641 
642 	if (skip_resume)
643 		goto Skip;
644 
645 	if (dev->driver && dev->driver->pm) {
646 		info = "noirq driver ";
647 		callback = pm_noirq_op(dev->driver->pm, state);
648 	}
649 
650 Run:
651 	error = dpm_run_callback(callback, dev, state, info);
652 
653 Skip:
654 	dev->power.is_noirq_suspended = false;
655 
656 Out:
657 	complete_all(&dev->power.completion);
658 	TRACE_RESUME(error);
659 
660 	if (error) {
661 		suspend_stats.failed_resume_noirq++;
662 		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
663 		dpm_save_failed_dev(dev_name(dev));
664 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
665 	}
666 }
667 
is_async(struct device * dev)668 static bool is_async(struct device *dev)
669 {
670 	return dev->power.async_suspend && pm_async_enabled
671 		&& !pm_trace_is_enabled();
672 }
673 
dpm_async_fn(struct device * dev,async_func_t func)674 static bool dpm_async_fn(struct device *dev, async_func_t func)
675 {
676 	reinit_completion(&dev->power.completion);
677 
678 	if (!is_async(dev))
679 		return false;
680 
681 	get_device(dev);
682 
683 	if (async_schedule_dev_nocall(func, dev))
684 		return true;
685 
686 	put_device(dev);
687 
688 	return false;
689 }
690 
async_resume_noirq(void * data,async_cookie_t cookie)691 static void async_resume_noirq(void *data, async_cookie_t cookie)
692 {
693 	struct device *dev = data;
694 
695 	__device_resume_noirq(dev, pm_transition, true);
696 	put_device(dev);
697 }
698 
device_resume_noirq(struct device * dev)699 static void device_resume_noirq(struct device *dev)
700 {
701 	if (dpm_async_fn(dev, async_resume_noirq))
702 		return;
703 
704 	__device_resume_noirq(dev, pm_transition, false);
705 }
706 
dpm_noirq_resume_devices(pm_message_t state)707 static void dpm_noirq_resume_devices(pm_message_t state)
708 {
709 	struct device *dev;
710 	ktime_t starttime = ktime_get();
711 
712 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
713 	mutex_lock(&dpm_list_mtx);
714 	pm_transition = state;
715 
716 	while (!list_empty(&dpm_noirq_list)) {
717 		dev = to_device(dpm_noirq_list.next);
718 		get_device(dev);
719 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
720 
721 		mutex_unlock(&dpm_list_mtx);
722 
723 		device_resume_noirq(dev);
724 
725 		put_device(dev);
726 
727 		mutex_lock(&dpm_list_mtx);
728 	}
729 	mutex_unlock(&dpm_list_mtx);
730 	async_synchronize_full();
731 	dpm_show_time(starttime, state, 0, "noirq");
732 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
733 }
734 
735 /**
736  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
737  * @state: PM transition of the system being carried out.
738  *
739  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
740  * allow device drivers' interrupt handlers to be called.
741  */
dpm_resume_noirq(pm_message_t state)742 void dpm_resume_noirq(pm_message_t state)
743 {
744 	dpm_noirq_resume_devices(state);
745 
746 	resume_device_irqs();
747 	device_wakeup_disarm_wake_irqs();
748 }
749 
750 /**
751  * __device_resume_early - Execute an "early resume" callback for given device.
752  * @dev: Device to handle.
753  * @state: PM transition of the system being carried out.
754  * @async: If true, the device is being resumed asynchronously.
755  *
756  * Runtime PM is disabled for @dev while this function is being executed.
757  */
__device_resume_early(struct device * dev,pm_message_t state,bool async)758 static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
759 {
760 	pm_callback_t callback = NULL;
761 	const char *info = NULL;
762 	int error = 0;
763 
764 	TRACE_DEVICE(dev);
765 	TRACE_RESUME(0);
766 
767 	if (dev->power.syscore || dev->power.direct_complete)
768 		goto Out;
769 
770 	if (!dev->power.is_late_suspended)
771 		goto Out;
772 
773 	if (!dpm_wait_for_superior(dev, async))
774 		goto Out;
775 
776 	if (dev->pm_domain) {
777 		info = "early power domain ";
778 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
779 	} else if (dev->type && dev->type->pm) {
780 		info = "early type ";
781 		callback = pm_late_early_op(dev->type->pm, state);
782 	} else if (dev->class && dev->class->pm) {
783 		info = "early class ";
784 		callback = pm_late_early_op(dev->class->pm, state);
785 	} else if (dev->bus && dev->bus->pm) {
786 		info = "early bus ";
787 		callback = pm_late_early_op(dev->bus->pm, state);
788 	}
789 	if (callback)
790 		goto Run;
791 
792 	if (dev_pm_skip_resume(dev))
793 		goto Skip;
794 
795 	if (dev->driver && dev->driver->pm) {
796 		info = "early driver ";
797 		callback = pm_late_early_op(dev->driver->pm, state);
798 	}
799 
800 Run:
801 	error = dpm_run_callback(callback, dev, state, info);
802 
803 Skip:
804 	dev->power.is_late_suspended = false;
805 
806 Out:
807 	TRACE_RESUME(error);
808 
809 	pm_runtime_enable(dev);
810 	complete_all(&dev->power.completion);
811 
812 	if (error) {
813 		suspend_stats.failed_resume_early++;
814 		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
815 		dpm_save_failed_dev(dev_name(dev));
816 		pm_dev_err(dev, state, async ? " async early" : " early", error);
817 	}
818 }
819 
async_resume_early(void * data,async_cookie_t cookie)820 static void async_resume_early(void *data, async_cookie_t cookie)
821 {
822 	struct device *dev = data;
823 
824 	__device_resume_early(dev, pm_transition, true);
825 	put_device(dev);
826 }
827 
device_resume_early(struct device * dev)828 static void device_resume_early(struct device *dev)
829 {
830 	if (dpm_async_fn(dev, async_resume_early))
831 		return;
832 
833 	__device_resume_early(dev, pm_transition, false);
834 }
835 
836 /**
837  * dpm_resume_early - Execute "early resume" callbacks for all devices.
838  * @state: PM transition of the system being carried out.
839  */
dpm_resume_early(pm_message_t state)840 void dpm_resume_early(pm_message_t state)
841 {
842 	struct device *dev;
843 	ktime_t starttime = ktime_get();
844 
845 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
846 	mutex_lock(&dpm_list_mtx);
847 	pm_transition = state;
848 
849 	while (!list_empty(&dpm_late_early_list)) {
850 		dev = to_device(dpm_late_early_list.next);
851 		get_device(dev);
852 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
853 
854 		mutex_unlock(&dpm_list_mtx);
855 
856 		device_resume_early(dev);
857 
858 		put_device(dev);
859 
860 		mutex_lock(&dpm_list_mtx);
861 	}
862 	mutex_unlock(&dpm_list_mtx);
863 	async_synchronize_full();
864 	dpm_show_time(starttime, state, 0, "early");
865 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
866 }
867 
868 /**
869  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
870  * @state: PM transition of the system being carried out.
871  */
dpm_resume_start(pm_message_t state)872 void dpm_resume_start(pm_message_t state)
873 {
874 	dpm_resume_noirq(state);
875 	dpm_resume_early(state);
876 }
877 EXPORT_SYMBOL_GPL(dpm_resume_start);
878 
879 /**
880  * __device_resume - Execute "resume" callbacks for given device.
881  * @dev: Device to handle.
882  * @state: PM transition of the system being carried out.
883  * @async: If true, the device is being resumed asynchronously.
884  */
__device_resume(struct device * dev,pm_message_t state,bool async)885 static void __device_resume(struct device *dev, pm_message_t state, bool async)
886 {
887 	pm_callback_t callback = NULL;
888 	const char *info = NULL;
889 	int error = 0;
890 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
891 
892 	TRACE_DEVICE(dev);
893 	TRACE_RESUME(0);
894 
895 	if (dev->power.syscore)
896 		goto Complete;
897 
898 	if (dev->power.direct_complete) {
899 		/* Match the pm_runtime_disable() in __device_suspend(). */
900 		pm_runtime_enable(dev);
901 		goto Complete;
902 	}
903 
904 	if (!dpm_wait_for_superior(dev, async))
905 		goto Complete;
906 
907 	dpm_watchdog_set(&wd, dev);
908 	device_lock(dev);
909 
910 	/*
911 	 * This is a fib.  But we'll allow new children to be added below
912 	 * a resumed device, even if the device hasn't been completed yet.
913 	 */
914 	dev->power.is_prepared = false;
915 
916 	if (!dev->power.is_suspended)
917 		goto Unlock;
918 
919 	if (dev->pm_domain) {
920 		info = "power domain ";
921 		callback = pm_op(&dev->pm_domain->ops, state);
922 		goto Driver;
923 	}
924 
925 	if (dev->type && dev->type->pm) {
926 		info = "type ";
927 		callback = pm_op(dev->type->pm, state);
928 		goto Driver;
929 	}
930 
931 	if (dev->class && dev->class->pm) {
932 		info = "class ";
933 		callback = pm_op(dev->class->pm, state);
934 		goto Driver;
935 	}
936 
937 	if (dev->bus) {
938 		if (dev->bus->pm) {
939 			info = "bus ";
940 			callback = pm_op(dev->bus->pm, state);
941 		} else if (dev->bus->resume) {
942 			info = "legacy bus ";
943 			callback = dev->bus->resume;
944 			goto End;
945 		}
946 	}
947 
948  Driver:
949 	if (!callback && dev->driver && dev->driver->pm) {
950 		info = "driver ";
951 		callback = pm_op(dev->driver->pm, state);
952 	}
953 
954  End:
955 	error = dpm_run_callback(callback, dev, state, info);
956 	dev->power.is_suspended = false;
957 
958  Unlock:
959 	device_unlock(dev);
960 	dpm_watchdog_clear(&wd);
961 
962  Complete:
963 	complete_all(&dev->power.completion);
964 
965 	TRACE_RESUME(error);
966 
967 	if (error) {
968 		suspend_stats.failed_resume++;
969 		dpm_save_failed_step(SUSPEND_RESUME);
970 		dpm_save_failed_dev(dev_name(dev));
971 		pm_dev_err(dev, state, async ? " async" : "", error);
972 	}
973 }
974 
async_resume(void * data,async_cookie_t cookie)975 static void async_resume(void *data, async_cookie_t cookie)
976 {
977 	struct device *dev = data;
978 
979 	__device_resume(dev, pm_transition, true);
980 	put_device(dev);
981 }
982 
device_resume(struct device * dev)983 static void device_resume(struct device *dev)
984 {
985 	if (dpm_async_fn(dev, async_resume))
986 		return;
987 
988 	__device_resume(dev, pm_transition, false);
989 }
990 
991 /**
992  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
993  * @state: PM transition of the system being carried out.
994  *
995  * Execute the appropriate "resume" callback for all devices whose status
996  * indicates that they are suspended.
997  */
dpm_resume(pm_message_t state)998 void dpm_resume(pm_message_t state)
999 {
1000 	struct device *dev;
1001 	ktime_t starttime = ktime_get();
1002 
1003 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1004 	might_sleep();
1005 
1006 	mutex_lock(&dpm_list_mtx);
1007 	pm_transition = state;
1008 	async_error = 0;
1009 
1010 	while (!list_empty(&dpm_suspended_list)) {
1011 		dev = to_device(dpm_suspended_list.next);
1012 
1013 		get_device(dev);
1014 
1015 		mutex_unlock(&dpm_list_mtx);
1016 
1017 		device_resume(dev);
1018 
1019 		mutex_lock(&dpm_list_mtx);
1020 
1021 		if (!list_empty(&dev->power.entry))
1022 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1023 
1024 		mutex_unlock(&dpm_list_mtx);
1025 
1026 		put_device(dev);
1027 
1028 		mutex_lock(&dpm_list_mtx);
1029 	}
1030 	mutex_unlock(&dpm_list_mtx);
1031 	async_synchronize_full();
1032 	dpm_show_time(starttime, state, 0, NULL);
1033 
1034 	cpufreq_resume();
1035 	devfreq_resume();
1036 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1037 }
1038 
1039 /**
1040  * device_complete - Complete a PM transition for given device.
1041  * @dev: Device to handle.
1042  * @state: PM transition of the system being carried out.
1043  */
device_complete(struct device * dev,pm_message_t state)1044 static void device_complete(struct device *dev, pm_message_t state)
1045 {
1046 	void (*callback)(struct device *) = NULL;
1047 	const char *info = NULL;
1048 
1049 	if (dev->power.syscore)
1050 		goto out;
1051 
1052 	device_lock(dev);
1053 
1054 	if (dev->pm_domain) {
1055 		info = "completing power domain ";
1056 		callback = dev->pm_domain->ops.complete;
1057 	} else if (dev->type && dev->type->pm) {
1058 		info = "completing type ";
1059 		callback = dev->type->pm->complete;
1060 	} else if (dev->class && dev->class->pm) {
1061 		info = "completing class ";
1062 		callback = dev->class->pm->complete;
1063 	} else if (dev->bus && dev->bus->pm) {
1064 		info = "completing bus ";
1065 		callback = dev->bus->pm->complete;
1066 	}
1067 
1068 	if (!callback && dev->driver && dev->driver->pm) {
1069 		info = "completing driver ";
1070 		callback = dev->driver->pm->complete;
1071 	}
1072 
1073 	if (callback) {
1074 		pm_dev_dbg(dev, state, info);
1075 		callback(dev);
1076 	}
1077 
1078 	device_unlock(dev);
1079 
1080 out:
1081 	pm_runtime_put(dev);
1082 }
1083 
1084 /**
1085  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1086  * @state: PM transition of the system being carried out.
1087  *
1088  * Execute the ->complete() callbacks for all devices whose PM status is not
1089  * DPM_ON (this allows new devices to be registered).
1090  */
dpm_complete(pm_message_t state)1091 void dpm_complete(pm_message_t state)
1092 {
1093 	struct list_head list;
1094 
1095 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1096 	might_sleep();
1097 
1098 	INIT_LIST_HEAD(&list);
1099 	mutex_lock(&dpm_list_mtx);
1100 	while (!list_empty(&dpm_prepared_list)) {
1101 		struct device *dev = to_device(dpm_prepared_list.prev);
1102 
1103 		get_device(dev);
1104 		dev->power.is_prepared = false;
1105 		list_move(&dev->power.entry, &list);
1106 
1107 		mutex_unlock(&dpm_list_mtx);
1108 
1109 		trace_device_pm_callback_start(dev, "", state.event);
1110 		device_complete(dev, state);
1111 		trace_device_pm_callback_end(dev, 0);
1112 
1113 		put_device(dev);
1114 
1115 		mutex_lock(&dpm_list_mtx);
1116 	}
1117 	list_splice(&list, &dpm_list);
1118 	mutex_unlock(&dpm_list_mtx);
1119 
1120 	/* Allow device probing and trigger re-probing of deferred devices */
1121 	device_unblock_probing();
1122 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1123 }
1124 
1125 /**
1126  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1127  * @state: PM transition of the system being carried out.
1128  *
1129  * Execute "resume" callbacks for all devices and complete the PM transition of
1130  * the system.
1131  */
dpm_resume_end(pm_message_t state)1132 void dpm_resume_end(pm_message_t state)
1133 {
1134 	dpm_resume(state);
1135 	dpm_complete(state);
1136 }
1137 EXPORT_SYMBOL_GPL(dpm_resume_end);
1138 
1139 
1140 /*------------------------- Suspend routines -------------------------*/
1141 
1142 /**
1143  * resume_event - Return a "resume" message for given "suspend" sleep state.
1144  * @sleep_state: PM message representing a sleep state.
1145  *
1146  * Return a PM message representing the resume event corresponding to given
1147  * sleep state.
1148  */
resume_event(pm_message_t sleep_state)1149 static pm_message_t resume_event(pm_message_t sleep_state)
1150 {
1151 	switch (sleep_state.event) {
1152 	case PM_EVENT_SUSPEND:
1153 		return PMSG_RESUME;
1154 	case PM_EVENT_FREEZE:
1155 	case PM_EVENT_QUIESCE:
1156 		return PMSG_RECOVER;
1157 	case PM_EVENT_HIBERNATE:
1158 		return PMSG_RESTORE;
1159 	}
1160 	return PMSG_ON;
1161 }
1162 
dpm_superior_set_must_resume(struct device * dev)1163 static void dpm_superior_set_must_resume(struct device *dev)
1164 {
1165 	struct device_link *link;
1166 	int idx;
1167 
1168 	if (dev->parent)
1169 		dev->parent->power.must_resume = true;
1170 
1171 	idx = device_links_read_lock();
1172 
1173 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1174 		link->supplier->power.must_resume = true;
1175 
1176 	device_links_read_unlock(idx);
1177 }
1178 
1179 /**
1180  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1181  * @dev: Device to handle.
1182  * @state: PM transition of the system being carried out.
1183  * @async: If true, the device is being suspended asynchronously.
1184  *
1185  * The driver of @dev will not receive interrupts while this function is being
1186  * executed.
1187  */
__device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1188 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1189 {
1190 	pm_callback_t callback = NULL;
1191 	const char *info = NULL;
1192 	int error = 0;
1193 
1194 	TRACE_DEVICE(dev);
1195 	TRACE_SUSPEND(0);
1196 
1197 	dpm_wait_for_subordinate(dev, async);
1198 
1199 	if (async_error)
1200 		goto Complete;
1201 
1202 	if (dev->power.syscore || dev->power.direct_complete)
1203 		goto Complete;
1204 
1205 	if (dev->pm_domain) {
1206 		info = "noirq power domain ";
1207 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1208 	} else if (dev->type && dev->type->pm) {
1209 		info = "noirq type ";
1210 		callback = pm_noirq_op(dev->type->pm, state);
1211 	} else if (dev->class && dev->class->pm) {
1212 		info = "noirq class ";
1213 		callback = pm_noirq_op(dev->class->pm, state);
1214 	} else if (dev->bus && dev->bus->pm) {
1215 		info = "noirq bus ";
1216 		callback = pm_noirq_op(dev->bus->pm, state);
1217 	}
1218 	if (callback)
1219 		goto Run;
1220 
1221 	if (dev_pm_skip_suspend(dev))
1222 		goto Skip;
1223 
1224 	if (dev->driver && dev->driver->pm) {
1225 		info = "noirq driver ";
1226 		callback = pm_noirq_op(dev->driver->pm, state);
1227 	}
1228 
1229 Run:
1230 	error = dpm_run_callback(callback, dev, state, info);
1231 	if (error) {
1232 		async_error = error;
1233 		log_suspend_abort_reason("Device %s failed to %s noirq: error %d",
1234 					 dev_name(dev), pm_verb(state.event), error);
1235 		goto Complete;
1236 	}
1237 
1238 Skip:
1239 	dev->power.is_noirq_suspended = true;
1240 
1241 	/*
1242 	 * Skipping the resume of devices that were in use right before the
1243 	 * system suspend (as indicated by their PM-runtime usage counters)
1244 	 * would be suboptimal.  Also resume them if doing that is not allowed
1245 	 * to be skipped.
1246 	 */
1247 	if (atomic_read(&dev->power.usage_count) > 1 ||
1248 	    !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1249 	      dev->power.may_skip_resume))
1250 		dev->power.must_resume = true;
1251 
1252 	if (dev->power.must_resume)
1253 		dpm_superior_set_must_resume(dev);
1254 
1255 Complete:
1256 	complete_all(&dev->power.completion);
1257 	TRACE_SUSPEND(error);
1258 	return error;
1259 }
1260 
async_suspend_noirq(void * data,async_cookie_t cookie)1261 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1262 {
1263 	struct device *dev = data;
1264 	int error;
1265 
1266 	error = __device_suspend_noirq(dev, pm_transition, true);
1267 	if (error) {
1268 		dpm_save_failed_dev(dev_name(dev));
1269 		pm_dev_err(dev, pm_transition, " async", error);
1270 	}
1271 
1272 	put_device(dev);
1273 }
1274 
device_suspend_noirq(struct device * dev)1275 static int device_suspend_noirq(struct device *dev)
1276 {
1277 	if (dpm_async_fn(dev, async_suspend_noirq))
1278 		return 0;
1279 
1280 	return __device_suspend_noirq(dev, pm_transition, false);
1281 }
1282 
dpm_noirq_suspend_devices(pm_message_t state)1283 static int dpm_noirq_suspend_devices(pm_message_t state)
1284 {
1285 	ktime_t starttime = ktime_get();
1286 	int error = 0;
1287 
1288 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1289 	mutex_lock(&dpm_list_mtx);
1290 	pm_transition = state;
1291 	async_error = 0;
1292 
1293 	while (!list_empty(&dpm_late_early_list)) {
1294 		struct device *dev = to_device(dpm_late_early_list.prev);
1295 
1296 		get_device(dev);
1297 		mutex_unlock(&dpm_list_mtx);
1298 
1299 		error = device_suspend_noirq(dev);
1300 
1301 		mutex_lock(&dpm_list_mtx);
1302 
1303 		if (error) {
1304 			pm_dev_err(dev, state, " noirq", error);
1305 			dpm_save_failed_dev(dev_name(dev));
1306 		} else if (!list_empty(&dev->power.entry)) {
1307 			list_move(&dev->power.entry, &dpm_noirq_list);
1308 		}
1309 
1310 		mutex_unlock(&dpm_list_mtx);
1311 
1312 		put_device(dev);
1313 
1314 		mutex_lock(&dpm_list_mtx);
1315 
1316 		if (error || async_error)
1317 			break;
1318 	}
1319 	mutex_unlock(&dpm_list_mtx);
1320 	async_synchronize_full();
1321 	if (!error)
1322 		error = async_error;
1323 
1324 	if (error) {
1325 		suspend_stats.failed_suspend_noirq++;
1326 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1327 	}
1328 	dpm_show_time(starttime, state, error, "noirq");
1329 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1330 	return error;
1331 }
1332 
1333 /**
1334  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1335  * @state: PM transition of the system being carried out.
1336  *
1337  * Prevent device drivers' interrupt handlers from being called and invoke
1338  * "noirq" suspend callbacks for all non-sysdev devices.
1339  */
dpm_suspend_noirq(pm_message_t state)1340 int dpm_suspend_noirq(pm_message_t state)
1341 {
1342 	int ret;
1343 
1344 	device_wakeup_arm_wake_irqs();
1345 	suspend_device_irqs();
1346 
1347 	ret = dpm_noirq_suspend_devices(state);
1348 	if (ret)
1349 		dpm_resume_noirq(resume_event(state));
1350 
1351 	return ret;
1352 }
1353 
dpm_propagate_wakeup_to_parent(struct device * dev)1354 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1355 {
1356 	struct device *parent = dev->parent;
1357 
1358 	if (!parent)
1359 		return;
1360 
1361 	spin_lock_irq(&parent->power.lock);
1362 
1363 	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1364 		parent->power.wakeup_path = true;
1365 
1366 	spin_unlock_irq(&parent->power.lock);
1367 }
1368 
1369 /**
1370  * __device_suspend_late - Execute a "late suspend" callback for given device.
1371  * @dev: Device to handle.
1372  * @state: PM transition of the system being carried out.
1373  * @async: If true, the device is being suspended asynchronously.
1374  *
1375  * Runtime PM is disabled for @dev while this function is being executed.
1376  */
__device_suspend_late(struct device * dev,pm_message_t state,bool async)1377 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1378 {
1379 	pm_callback_t callback = NULL;
1380 	const char *info = NULL;
1381 	int error = 0;
1382 
1383 	TRACE_DEVICE(dev);
1384 	TRACE_SUSPEND(0);
1385 
1386 	__pm_runtime_disable(dev, false);
1387 
1388 	dpm_wait_for_subordinate(dev, async);
1389 
1390 	if (async_error)
1391 		goto Complete;
1392 
1393 	if (pm_wakeup_pending()) {
1394 		async_error = -EBUSY;
1395 		goto Complete;
1396 	}
1397 
1398 	if (dev->power.syscore || dev->power.direct_complete)
1399 		goto Complete;
1400 
1401 	if (dev->pm_domain) {
1402 		info = "late power domain ";
1403 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1404 	} else if (dev->type && dev->type->pm) {
1405 		info = "late type ";
1406 		callback = pm_late_early_op(dev->type->pm, state);
1407 	} else if (dev->class && dev->class->pm) {
1408 		info = "late class ";
1409 		callback = pm_late_early_op(dev->class->pm, state);
1410 	} else if (dev->bus && dev->bus->pm) {
1411 		info = "late bus ";
1412 		callback = pm_late_early_op(dev->bus->pm, state);
1413 	}
1414 	if (callback)
1415 		goto Run;
1416 
1417 	if (dev_pm_skip_suspend(dev))
1418 		goto Skip;
1419 
1420 	if (dev->driver && dev->driver->pm) {
1421 		info = "late driver ";
1422 		callback = pm_late_early_op(dev->driver->pm, state);
1423 	}
1424 
1425 Run:
1426 	error = dpm_run_callback(callback, dev, state, info);
1427 	if (error) {
1428 		async_error = error;
1429 		log_suspend_abort_reason("Device %s failed to %s late: error %d",
1430 					 dev_name(dev), pm_verb(state.event), error);
1431 		goto Complete;
1432 	}
1433 	dpm_propagate_wakeup_to_parent(dev);
1434 
1435 Skip:
1436 	dev->power.is_late_suspended = true;
1437 
1438 Complete:
1439 	TRACE_SUSPEND(error);
1440 	complete_all(&dev->power.completion);
1441 	return error;
1442 }
1443 
async_suspend_late(void * data,async_cookie_t cookie)1444 static void async_suspend_late(void *data, async_cookie_t cookie)
1445 {
1446 	struct device *dev = data;
1447 	int error;
1448 
1449 	error = __device_suspend_late(dev, pm_transition, true);
1450 	if (error) {
1451 		dpm_save_failed_dev(dev_name(dev));
1452 		pm_dev_err(dev, pm_transition, " async", error);
1453 	}
1454 	put_device(dev);
1455 }
1456 
device_suspend_late(struct device * dev)1457 static int device_suspend_late(struct device *dev)
1458 {
1459 	if (dpm_async_fn(dev, async_suspend_late))
1460 		return 0;
1461 
1462 	return __device_suspend_late(dev, pm_transition, false);
1463 }
1464 
1465 /**
1466  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1467  * @state: PM transition of the system being carried out.
1468  */
dpm_suspend_late(pm_message_t state)1469 int dpm_suspend_late(pm_message_t state)
1470 {
1471 	ktime_t starttime = ktime_get();
1472 	int error = 0;
1473 
1474 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1475 	wake_up_all_idle_cpus();
1476 	mutex_lock(&dpm_list_mtx);
1477 	pm_transition = state;
1478 	async_error = 0;
1479 
1480 	while (!list_empty(&dpm_suspended_list)) {
1481 		struct device *dev = to_device(dpm_suspended_list.prev);
1482 
1483 		get_device(dev);
1484 
1485 		mutex_unlock(&dpm_list_mtx);
1486 
1487 		error = device_suspend_late(dev);
1488 
1489 		mutex_lock(&dpm_list_mtx);
1490 
1491 		if (!list_empty(&dev->power.entry))
1492 			list_move(&dev->power.entry, &dpm_late_early_list);
1493 
1494 		if (error) {
1495 			pm_dev_err(dev, state, " late", error);
1496 			dpm_save_failed_dev(dev_name(dev));
1497 		}
1498 
1499 		mutex_unlock(&dpm_list_mtx);
1500 
1501 		put_device(dev);
1502 
1503 		mutex_lock(&dpm_list_mtx);
1504 
1505 		if (error || async_error)
1506 			break;
1507 	}
1508 	mutex_unlock(&dpm_list_mtx);
1509 	async_synchronize_full();
1510 	if (!error)
1511 		error = async_error;
1512 	if (error) {
1513 		suspend_stats.failed_suspend_late++;
1514 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1515 		dpm_resume_early(resume_event(state));
1516 	}
1517 	dpm_show_time(starttime, state, error, "late");
1518 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1519 	return error;
1520 }
1521 
1522 /**
1523  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1524  * @state: PM transition of the system being carried out.
1525  */
dpm_suspend_end(pm_message_t state)1526 int dpm_suspend_end(pm_message_t state)
1527 {
1528 	ktime_t starttime = ktime_get();
1529 	int error;
1530 
1531 	error = dpm_suspend_late(state);
1532 	if (error)
1533 		goto out;
1534 
1535 	error = dpm_suspend_noirq(state);
1536 	if (error)
1537 		dpm_resume_early(resume_event(state));
1538 
1539 out:
1540 	dpm_show_time(starttime, state, error, "end");
1541 	return error;
1542 }
1543 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1544 
1545 /**
1546  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1547  * @dev: Device to suspend.
1548  * @state: PM transition of the system being carried out.
1549  * @cb: Suspend callback to execute.
1550  * @info: string description of caller.
1551  */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1552 static int legacy_suspend(struct device *dev, pm_message_t state,
1553 			  int (*cb)(struct device *dev, pm_message_t state),
1554 			  const char *info)
1555 {
1556 	int error;
1557 	ktime_t calltime;
1558 
1559 	calltime = initcall_debug_start(dev, cb);
1560 
1561 	trace_device_pm_callback_start(dev, info, state.event);
1562 	error = cb(dev, state);
1563 	trace_device_pm_callback_end(dev, error);
1564 	suspend_report_result(dev, cb, error);
1565 
1566 	initcall_debug_report(dev, calltime, cb, error);
1567 
1568 	return error;
1569 }
1570 
dpm_clear_superiors_direct_complete(struct device * dev)1571 static void dpm_clear_superiors_direct_complete(struct device *dev)
1572 {
1573 	struct device_link *link;
1574 	int idx;
1575 
1576 	if (dev->parent) {
1577 		spin_lock_irq(&dev->parent->power.lock);
1578 		dev->parent->power.direct_complete = false;
1579 		spin_unlock_irq(&dev->parent->power.lock);
1580 	}
1581 
1582 	idx = device_links_read_lock();
1583 
1584 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1585 		spin_lock_irq(&link->supplier->power.lock);
1586 		link->supplier->power.direct_complete = false;
1587 		spin_unlock_irq(&link->supplier->power.lock);
1588 	}
1589 
1590 	device_links_read_unlock(idx);
1591 }
1592 
1593 /**
1594  * __device_suspend - Execute "suspend" callbacks for given device.
1595  * @dev: Device to handle.
1596  * @state: PM transition of the system being carried out.
1597  * @async: If true, the device is being suspended asynchronously.
1598  */
__device_suspend(struct device * dev,pm_message_t state,bool async)1599 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1600 {
1601 	pm_callback_t callback = NULL;
1602 	const char *info = NULL;
1603 	int error = 0;
1604 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1605 
1606 	TRACE_DEVICE(dev);
1607 	TRACE_SUSPEND(0);
1608 
1609 	dpm_wait_for_subordinate(dev, async);
1610 
1611 	if (async_error) {
1612 		dev->power.direct_complete = false;
1613 		goto Complete;
1614 	}
1615 
1616 	/*
1617 	 * Wait for possible runtime PM transitions of the device in progress
1618 	 * to complete and if there's a runtime resume request pending for it,
1619 	 * resume it before proceeding with invoking the system-wide suspend
1620 	 * callbacks for it.
1621 	 *
1622 	 * If the system-wide suspend callbacks below change the configuration
1623 	 * of the device, they must disable runtime PM for it or otherwise
1624 	 * ensure that its runtime-resume callbacks will not be confused by that
1625 	 * change in case they are invoked going forward.
1626 	 */
1627 	pm_runtime_barrier(dev);
1628 
1629 	if (pm_wakeup_pending()) {
1630 		dev->power.direct_complete = false;
1631 		async_error = -EBUSY;
1632 		goto Complete;
1633 	}
1634 
1635 	if (dev->power.syscore)
1636 		goto Complete;
1637 
1638 	/* Avoid direct_complete to let wakeup_path propagate. */
1639 	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1640 		dev->power.direct_complete = false;
1641 
1642 	if (dev->power.direct_complete) {
1643 		if (pm_runtime_status_suspended(dev)) {
1644 			pm_runtime_disable(dev);
1645 			if (pm_runtime_status_suspended(dev)) {
1646 				pm_dev_dbg(dev, state, "direct-complete ");
1647 				goto Complete;
1648 			}
1649 
1650 			pm_runtime_enable(dev);
1651 		}
1652 		dev->power.direct_complete = false;
1653 	}
1654 
1655 	dev->power.may_skip_resume = true;
1656 	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1657 
1658 	dpm_watchdog_set(&wd, dev);
1659 	device_lock(dev);
1660 
1661 	if (dev->pm_domain) {
1662 		info = "power domain ";
1663 		callback = pm_op(&dev->pm_domain->ops, state);
1664 		goto Run;
1665 	}
1666 
1667 	if (dev->type && dev->type->pm) {
1668 		info = "type ";
1669 		callback = pm_op(dev->type->pm, state);
1670 		goto Run;
1671 	}
1672 
1673 	if (dev->class && dev->class->pm) {
1674 		info = "class ";
1675 		callback = pm_op(dev->class->pm, state);
1676 		goto Run;
1677 	}
1678 
1679 	if (dev->bus) {
1680 		if (dev->bus->pm) {
1681 			info = "bus ";
1682 			callback = pm_op(dev->bus->pm, state);
1683 		} else if (dev->bus->suspend) {
1684 			pm_dev_dbg(dev, state, "legacy bus ");
1685 			error = legacy_suspend(dev, state, dev->bus->suspend,
1686 						"legacy bus ");
1687 			goto End;
1688 		}
1689 	}
1690 
1691  Run:
1692 	if (!callback && dev->driver && dev->driver->pm) {
1693 		info = "driver ";
1694 		callback = pm_op(dev->driver->pm, state);
1695 	}
1696 
1697 	error = dpm_run_callback(callback, dev, state, info);
1698 
1699  End:
1700 	if (!error) {
1701 		dev->power.is_suspended = true;
1702 		if (device_may_wakeup(dev))
1703 			dev->power.wakeup_path = true;
1704 
1705 		dpm_propagate_wakeup_to_parent(dev);
1706 		dpm_clear_superiors_direct_complete(dev);
1707 	} else {
1708 		log_suspend_abort_reason("Device %s failed to %s: error %d",
1709 					 dev_name(dev), pm_verb(state.event), error);
1710 	}
1711 
1712 	device_unlock(dev);
1713 	dpm_watchdog_clear(&wd);
1714 
1715  Complete:
1716 	if (error)
1717 		async_error = error;
1718 
1719 	complete_all(&dev->power.completion);
1720 	TRACE_SUSPEND(error);
1721 	return error;
1722 }
1723 
async_suspend(void * data,async_cookie_t cookie)1724 static void async_suspend(void *data, async_cookie_t cookie)
1725 {
1726 	struct device *dev = data;
1727 	int error;
1728 
1729 	error = __device_suspend(dev, pm_transition, true);
1730 	if (error) {
1731 		dpm_save_failed_dev(dev_name(dev));
1732 		pm_dev_err(dev, pm_transition, " async", error);
1733 	}
1734 
1735 	put_device(dev);
1736 }
1737 
device_suspend(struct device * dev)1738 static int device_suspend(struct device *dev)
1739 {
1740 	if (dpm_async_fn(dev, async_suspend))
1741 		return 0;
1742 
1743 	return __device_suspend(dev, pm_transition, false);
1744 }
1745 
1746 /**
1747  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1748  * @state: PM transition of the system being carried out.
1749  */
dpm_suspend(pm_message_t state)1750 int dpm_suspend(pm_message_t state)
1751 {
1752 	ktime_t starttime = ktime_get();
1753 	int error = 0;
1754 
1755 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1756 	might_sleep();
1757 
1758 	devfreq_suspend();
1759 	cpufreq_suspend();
1760 
1761 	mutex_lock(&dpm_list_mtx);
1762 	pm_transition = state;
1763 	async_error = 0;
1764 	while (!list_empty(&dpm_prepared_list)) {
1765 		struct device *dev = to_device(dpm_prepared_list.prev);
1766 
1767 		get_device(dev);
1768 
1769 		mutex_unlock(&dpm_list_mtx);
1770 
1771 		error = device_suspend(dev);
1772 
1773 		mutex_lock(&dpm_list_mtx);
1774 
1775 		if (error) {
1776 			pm_dev_err(dev, state, "", error);
1777 			dpm_save_failed_dev(dev_name(dev));
1778 		} else if (!list_empty(&dev->power.entry)) {
1779 			list_move(&dev->power.entry, &dpm_suspended_list);
1780 		}
1781 
1782 		mutex_unlock(&dpm_list_mtx);
1783 
1784 		put_device(dev);
1785 
1786 		mutex_lock(&dpm_list_mtx);
1787 
1788 		if (error || async_error)
1789 			break;
1790 	}
1791 	mutex_unlock(&dpm_list_mtx);
1792 	async_synchronize_full();
1793 	if (!error)
1794 		error = async_error;
1795 	if (error) {
1796 		suspend_stats.failed_suspend++;
1797 		dpm_save_failed_step(SUSPEND_SUSPEND);
1798 	}
1799 	dpm_show_time(starttime, state, error, NULL);
1800 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1801 	return error;
1802 }
1803 
1804 /**
1805  * device_prepare - Prepare a device for system power transition.
1806  * @dev: Device to handle.
1807  * @state: PM transition of the system being carried out.
1808  *
1809  * Execute the ->prepare() callback(s) for given device.  No new children of the
1810  * device may be registered after this function has returned.
1811  */
device_prepare(struct device * dev,pm_message_t state)1812 static int device_prepare(struct device *dev, pm_message_t state)
1813 {
1814 	int (*callback)(struct device *) = NULL;
1815 	int ret = 0;
1816 
1817 	/*
1818 	 * If a device's parent goes into runtime suspend at the wrong time,
1819 	 * it won't be possible to resume the device.  To prevent this we
1820 	 * block runtime suspend here, during the prepare phase, and allow
1821 	 * it again during the complete phase.
1822 	 */
1823 	pm_runtime_get_noresume(dev);
1824 
1825 	if (dev->power.syscore)
1826 		return 0;
1827 
1828 	device_lock(dev);
1829 
1830 	dev->power.wakeup_path = false;
1831 
1832 	if (dev->power.no_pm_callbacks)
1833 		goto unlock;
1834 
1835 	if (dev->pm_domain)
1836 		callback = dev->pm_domain->ops.prepare;
1837 	else if (dev->type && dev->type->pm)
1838 		callback = dev->type->pm->prepare;
1839 	else if (dev->class && dev->class->pm)
1840 		callback = dev->class->pm->prepare;
1841 	else if (dev->bus && dev->bus->pm)
1842 		callback = dev->bus->pm->prepare;
1843 
1844 	if (!callback && dev->driver && dev->driver->pm)
1845 		callback = dev->driver->pm->prepare;
1846 
1847 	if (callback)
1848 		ret = callback(dev);
1849 
1850 unlock:
1851 	device_unlock(dev);
1852 
1853 	if (ret < 0) {
1854 		suspend_report_result(dev, callback, ret);
1855 		pm_runtime_put(dev);
1856 		return ret;
1857 	}
1858 	/*
1859 	 * A positive return value from ->prepare() means "this device appears
1860 	 * to be runtime-suspended and its state is fine, so if it really is
1861 	 * runtime-suspended, you can leave it in that state provided that you
1862 	 * will do the same thing with all of its descendants".  This only
1863 	 * applies to suspend transitions, however.
1864 	 */
1865 	spin_lock_irq(&dev->power.lock);
1866 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1867 		(ret > 0 || dev->power.no_pm_callbacks) &&
1868 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1869 	spin_unlock_irq(&dev->power.lock);
1870 	return 0;
1871 }
1872 
1873 /**
1874  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1875  * @state: PM transition of the system being carried out.
1876  *
1877  * Execute the ->prepare() callback(s) for all devices.
1878  */
dpm_prepare(pm_message_t state)1879 int dpm_prepare(pm_message_t state)
1880 {
1881 	int error = 0;
1882 
1883 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1884 	might_sleep();
1885 
1886 	/*
1887 	 * Give a chance for the known devices to complete their probes, before
1888 	 * disable probing of devices. This sync point is important at least
1889 	 * at boot time + hibernation restore.
1890 	 */
1891 	wait_for_device_probe();
1892 	/*
1893 	 * It is unsafe if probing of devices will happen during suspend or
1894 	 * hibernation and system behavior will be unpredictable in this case.
1895 	 * So, let's prohibit device's probing here and defer their probes
1896 	 * instead. The normal behavior will be restored in dpm_complete().
1897 	 */
1898 	device_block_probing();
1899 
1900 	mutex_lock(&dpm_list_mtx);
1901 	while (!list_empty(&dpm_list) && !error) {
1902 		struct device *dev = to_device(dpm_list.next);
1903 
1904 		get_device(dev);
1905 
1906 		mutex_unlock(&dpm_list_mtx);
1907 
1908 		trace_device_pm_callback_start(dev, "", state.event);
1909 		error = device_prepare(dev, state);
1910 		trace_device_pm_callback_end(dev, error);
1911 
1912 		mutex_lock(&dpm_list_mtx);
1913 
1914 		if (!error) {
1915 			dev->power.is_prepared = true;
1916 			if (!list_empty(&dev->power.entry))
1917 				list_move_tail(&dev->power.entry, &dpm_prepared_list);
1918 		} else if (error == -EAGAIN) {
1919 			error = 0;
1920 		} else {
1921 			dev_info(dev, "not prepared for power transition: code %d\n",
1922 				 error);
1923 			log_suspend_abort_reason("Device %s not prepared for power transition: code %d",
1924 						 dev_name(dev), error);
1925 			dpm_save_failed_dev(dev_name(dev));
1926 		}
1927 
1928 		mutex_unlock(&dpm_list_mtx);
1929 
1930 		put_device(dev);
1931 
1932 		mutex_lock(&dpm_list_mtx);
1933 	}
1934 	mutex_unlock(&dpm_list_mtx);
1935 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1936 	return error;
1937 }
1938 
1939 /**
1940  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1941  * @state: PM transition of the system being carried out.
1942  *
1943  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1944  * callbacks for them.
1945  */
dpm_suspend_start(pm_message_t state)1946 int dpm_suspend_start(pm_message_t state)
1947 {
1948 	ktime_t starttime = ktime_get();
1949 	int error;
1950 
1951 	error = dpm_prepare(state);
1952 	if (error) {
1953 		suspend_stats.failed_prepare++;
1954 		dpm_save_failed_step(SUSPEND_PREPARE);
1955 	} else
1956 		error = dpm_suspend(state);
1957 	dpm_show_time(starttime, state, error, "start");
1958 	return error;
1959 }
1960 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1961 
__suspend_report_result(const char * function,struct device * dev,void * fn,int ret)1962 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1963 {
1964 	if (ret)
1965 		dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
1966 }
1967 EXPORT_SYMBOL_GPL(__suspend_report_result);
1968 
1969 /**
1970  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1971  * @subordinate: Device that needs to wait for @dev.
1972  * @dev: Device to wait for.
1973  */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)1974 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1975 {
1976 	dpm_wait(dev, subordinate->power.async_suspend);
1977 	return async_error;
1978 }
1979 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1980 
1981 /**
1982  * dpm_for_each_dev - device iterator.
1983  * @data: data for the callback.
1984  * @fn: function to be called for each device.
1985  *
1986  * Iterate over devices in dpm_list, and call @fn for each device,
1987  * passing it @data.
1988  */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))1989 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1990 {
1991 	struct device *dev;
1992 
1993 	if (!fn)
1994 		return;
1995 
1996 	device_pm_lock();
1997 	list_for_each_entry(dev, &dpm_list, power.entry)
1998 		fn(dev, data);
1999 	device_pm_unlock();
2000 }
2001 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2002 
pm_ops_is_empty(const struct dev_pm_ops * ops)2003 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2004 {
2005 	if (!ops)
2006 		return true;
2007 
2008 	return !ops->prepare &&
2009 	       !ops->suspend &&
2010 	       !ops->suspend_late &&
2011 	       !ops->suspend_noirq &&
2012 	       !ops->resume_noirq &&
2013 	       !ops->resume_early &&
2014 	       !ops->resume &&
2015 	       !ops->complete;
2016 }
2017 
device_pm_check_callbacks(struct device * dev)2018 void device_pm_check_callbacks(struct device *dev)
2019 {
2020 	unsigned long flags;
2021 
2022 	spin_lock_irqsave(&dev->power.lock, flags);
2023 	dev->power.no_pm_callbacks =
2024 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2025 		 !dev->bus->suspend && !dev->bus->resume)) &&
2026 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2027 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2028 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2029 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2030 		 !dev->driver->suspend && !dev->driver->resume));
2031 	spin_unlock_irqrestore(&dev->power.lock, flags);
2032 }
2033 
dev_pm_skip_suspend(struct device * dev)2034 bool dev_pm_skip_suspend(struct device *dev)
2035 {
2036 	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2037 		pm_runtime_status_suspended(dev);
2038 }
2039