• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20 
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 #include <linux/wakeup_reason.h>
38 
39 #include <trace/hooks/dtask.h>
40 
41 #include "../base.h"
42 #include "power.h"
43 
44 typedef int (*pm_callback_t)(struct device *);
45 
46 #define list_for_each_entry_rcu_locked(pos, head, member) \
47 	list_for_each_entry_rcu(pos, head, member, \
48 			device_links_read_lock_held())
49 
50 /*
51  * The entries in the dpm_list list are in a depth first order, simply
52  * because children are guaranteed to be discovered after parents, and
53  * are inserted at the back of the list on discovery.
54  *
55  * Since device_pm_add() may be called with a device lock held,
56  * we must never try to acquire a device lock while holding
57  * dpm_list_mutex.
58  */
59 
60 LIST_HEAD(dpm_list);
61 static LIST_HEAD(dpm_prepared_list);
62 static LIST_HEAD(dpm_suspended_list);
63 static LIST_HEAD(dpm_late_early_list);
64 static LIST_HEAD(dpm_noirq_list);
65 
66 struct suspend_stats suspend_stats;
67 static DEFINE_MUTEX(dpm_list_mtx);
68 static pm_message_t pm_transition;
69 
70 static int async_error;
71 
pm_verb(int event)72 static const char *pm_verb(int event)
73 {
74 	switch (event) {
75 	case PM_EVENT_SUSPEND:
76 		return "suspend";
77 	case PM_EVENT_RESUME:
78 		return "resume";
79 	case PM_EVENT_FREEZE:
80 		return "freeze";
81 	case PM_EVENT_QUIESCE:
82 		return "quiesce";
83 	case PM_EVENT_HIBERNATE:
84 		return "hibernate";
85 	case PM_EVENT_THAW:
86 		return "thaw";
87 	case PM_EVENT_RESTORE:
88 		return "restore";
89 	case PM_EVENT_RECOVER:
90 		return "recover";
91 	default:
92 		return "(unknown PM event)";
93 	}
94 }
95 
96 /**
97  * device_pm_sleep_init - Initialize system suspend-related device fields.
98  * @dev: Device object being initialized.
99  */
device_pm_sleep_init(struct device * dev)100 void device_pm_sleep_init(struct device *dev)
101 {
102 	dev->power.is_prepared = false;
103 	dev->power.is_suspended = false;
104 	dev->power.is_noirq_suspended = false;
105 	dev->power.is_late_suspended = false;
106 	init_completion(&dev->power.completion);
107 	complete_all(&dev->power.completion);
108 	dev->power.wakeup = NULL;
109 	INIT_LIST_HEAD(&dev->power.entry);
110 }
111 
112 /**
113  * device_pm_lock - Lock the list of active devices used by the PM core.
114  */
device_pm_lock(void)115 void device_pm_lock(void)
116 {
117 	mutex_lock(&dpm_list_mtx);
118 }
119 
120 /**
121  * device_pm_unlock - Unlock the list of active devices used by the PM core.
122  */
device_pm_unlock(void)123 void device_pm_unlock(void)
124 {
125 	mutex_unlock(&dpm_list_mtx);
126 }
127 
128 /**
129  * device_pm_add - Add a device to the PM core's list of active devices.
130  * @dev: Device to add to the list.
131  */
device_pm_add(struct device * dev)132 void device_pm_add(struct device *dev)
133 {
134 	/* Skip PM setup/initialization. */
135 	if (device_pm_not_required(dev))
136 		return;
137 
138 	pr_debug("Adding info for %s:%s\n",
139 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
140 	device_pm_check_callbacks(dev);
141 	mutex_lock(&dpm_list_mtx);
142 	if (dev->parent && dev->parent->power.is_prepared)
143 		dev_warn(dev, "parent %s should not be sleeping\n",
144 			dev_name(dev->parent));
145 	list_add_tail(&dev->power.entry, &dpm_list);
146 	dev->power.in_dpm_list = true;
147 	mutex_unlock(&dpm_list_mtx);
148 }
149 
150 /**
151  * device_pm_remove - Remove a device from the PM core's list of active devices.
152  * @dev: Device to be removed from the list.
153  */
device_pm_remove(struct device * dev)154 void device_pm_remove(struct device *dev)
155 {
156 	if (device_pm_not_required(dev))
157 		return;
158 
159 	pr_debug("Removing info for %s:%s\n",
160 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
161 	complete_all(&dev->power.completion);
162 	mutex_lock(&dpm_list_mtx);
163 	list_del_init(&dev->power.entry);
164 	dev->power.in_dpm_list = false;
165 	mutex_unlock(&dpm_list_mtx);
166 	device_wakeup_disable(dev);
167 	pm_runtime_remove(dev);
168 	device_pm_check_callbacks(dev);
169 }
170 
171 /**
172  * device_pm_move_before - Move device in the PM core's list of active devices.
173  * @deva: Device to move in dpm_list.
174  * @devb: Device @deva should come before.
175  */
device_pm_move_before(struct device * deva,struct device * devb)176 void device_pm_move_before(struct device *deva, struct device *devb)
177 {
178 	pr_debug("Moving %s:%s before %s:%s\n",
179 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
180 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
181 	/* Delete deva from dpm_list and reinsert before devb. */
182 	list_move_tail(&deva->power.entry, &devb->power.entry);
183 }
184 
185 /**
186  * device_pm_move_after - Move device in the PM core's list of active devices.
187  * @deva: Device to move in dpm_list.
188  * @devb: Device @deva should come after.
189  */
device_pm_move_after(struct device * deva,struct device * devb)190 void device_pm_move_after(struct device *deva, struct device *devb)
191 {
192 	pr_debug("Moving %s:%s after %s:%s\n",
193 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
194 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
195 	/* Delete deva from dpm_list and reinsert after devb. */
196 	list_move(&deva->power.entry, &devb->power.entry);
197 }
198 
199 /**
200  * device_pm_move_last - Move device to end of the PM core's list of devices.
201  * @dev: Device to move in dpm_list.
202  */
device_pm_move_last(struct device * dev)203 void device_pm_move_last(struct device *dev)
204 {
205 	pr_debug("Moving %s:%s to end of list\n",
206 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
207 	list_move_tail(&dev->power.entry, &dpm_list);
208 }
209 
initcall_debug_start(struct device * dev,void * cb)210 static ktime_t initcall_debug_start(struct device *dev, void *cb)
211 {
212 	if (!pm_print_times_enabled)
213 		return 0;
214 
215 	dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
216 		 task_pid_nr(current),
217 		 dev->parent ? dev_name(dev->parent) : "none");
218 	return ktime_get();
219 }
220 
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)221 static void initcall_debug_report(struct device *dev, ktime_t calltime,
222 				  void *cb, int error)
223 {
224 	ktime_t rettime;
225 
226 	if (!pm_print_times_enabled)
227 		return;
228 
229 	rettime = ktime_get();
230 	dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
231 		 (unsigned long long)ktime_us_delta(rettime, calltime));
232 }
233 
234 /**
235  * dpm_wait - Wait for a PM operation to complete.
236  * @dev: Device to wait for.
237  * @async: If unset, wait only if the device's power.async_suspend flag is set.
238  */
dpm_wait(struct device * dev,bool async)239 static void dpm_wait(struct device *dev, bool async)
240 {
241 	if (!dev)
242 		return;
243 
244 	if (async || (pm_async_enabled && dev->power.async_suspend)) {
245 		trace_android_vh_dpm_wait_start(dev);
246 		wait_for_completion(&dev->power.completion);
247 		trace_android_vh_dpm_wait_finish(dev);
248 	}
249 }
250 
dpm_wait_fn(struct device * dev,void * async_ptr)251 static int dpm_wait_fn(struct device *dev, void *async_ptr)
252 {
253 	dpm_wait(dev, *((bool *)async_ptr));
254 	return 0;
255 }
256 
dpm_wait_for_children(struct device * dev,bool async)257 static void dpm_wait_for_children(struct device *dev, bool async)
258 {
259        device_for_each_child(dev, &async, dpm_wait_fn);
260 }
261 
dpm_wait_for_suppliers(struct device * dev,bool async)262 static void dpm_wait_for_suppliers(struct device *dev, bool async)
263 {
264 	struct device_link *link;
265 	int idx;
266 
267 	idx = device_links_read_lock();
268 
269 	/*
270 	 * If the supplier goes away right after we've checked the link to it,
271 	 * we'll wait for its completion to change the state, but that's fine,
272 	 * because the only things that will block as a result are the SRCU
273 	 * callbacks freeing the link objects for the links in the list we're
274 	 * walking.
275 	 */
276 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
277 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
278 			dpm_wait(link->supplier, async);
279 
280 	device_links_read_unlock(idx);
281 }
282 
dpm_wait_for_superior(struct device * dev,bool async)283 static bool dpm_wait_for_superior(struct device *dev, bool async)
284 {
285 	struct device *parent;
286 
287 	/*
288 	 * If the device is resumed asynchronously and the parent's callback
289 	 * deletes both the device and the parent itself, the parent object may
290 	 * be freed while this function is running, so avoid that by reference
291 	 * counting the parent once more unless the device has been deleted
292 	 * already (in which case return right away).
293 	 */
294 	mutex_lock(&dpm_list_mtx);
295 
296 	if (!device_pm_initialized(dev)) {
297 		mutex_unlock(&dpm_list_mtx);
298 		return false;
299 	}
300 
301 	parent = get_device(dev->parent);
302 
303 	mutex_unlock(&dpm_list_mtx);
304 
305 	dpm_wait(parent, async);
306 	put_device(parent);
307 
308 	dpm_wait_for_suppliers(dev, async);
309 
310 	/*
311 	 * If the parent's callback has deleted the device, attempting to resume
312 	 * it would be invalid, so avoid doing that then.
313 	 */
314 	return device_pm_initialized(dev);
315 }
316 
dpm_wait_for_consumers(struct device * dev,bool async)317 static void dpm_wait_for_consumers(struct device *dev, bool async)
318 {
319 	struct device_link *link;
320 	int idx;
321 
322 	idx = device_links_read_lock();
323 
324 	/*
325 	 * The status of a device link can only be changed from "dormant" by a
326 	 * probe, but that cannot happen during system suspend/resume.  In
327 	 * theory it can change to "dormant" at that time, but then it is
328 	 * reasonable to wait for the target device anyway (eg. if it goes
329 	 * away, it's better to wait for it to go away completely and then
330 	 * continue instead of trying to continue in parallel with its
331 	 * unregistration).
332 	 */
333 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
334 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
335 			dpm_wait(link->consumer, async);
336 
337 	device_links_read_unlock(idx);
338 }
339 
dpm_wait_for_subordinate(struct device * dev,bool async)340 static void dpm_wait_for_subordinate(struct device *dev, bool async)
341 {
342 	dpm_wait_for_children(dev, async);
343 	dpm_wait_for_consumers(dev, async);
344 }
345 
346 /**
347  * pm_op - Return the PM operation appropriate for given PM event.
348  * @ops: PM operations to choose from.
349  * @state: PM transition of the system being carried out.
350  */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)351 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
352 {
353 	switch (state.event) {
354 #ifdef CONFIG_SUSPEND
355 	case PM_EVENT_SUSPEND:
356 		return ops->suspend;
357 	case PM_EVENT_RESUME:
358 		return ops->resume;
359 #endif /* CONFIG_SUSPEND */
360 #ifdef CONFIG_HIBERNATE_CALLBACKS
361 	case PM_EVENT_FREEZE:
362 	case PM_EVENT_QUIESCE:
363 		return ops->freeze;
364 	case PM_EVENT_HIBERNATE:
365 		return ops->poweroff;
366 	case PM_EVENT_THAW:
367 	case PM_EVENT_RECOVER:
368 		return ops->thaw;
369 	case PM_EVENT_RESTORE:
370 		return ops->restore;
371 #endif /* CONFIG_HIBERNATE_CALLBACKS */
372 	}
373 
374 	return NULL;
375 }
376 
377 /**
378  * pm_late_early_op - Return the PM operation appropriate for given PM event.
379  * @ops: PM operations to choose from.
380  * @state: PM transition of the system being carried out.
381  *
382  * Runtime PM is disabled for @dev while this function is being executed.
383  */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)384 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
385 				      pm_message_t state)
386 {
387 	switch (state.event) {
388 #ifdef CONFIG_SUSPEND
389 	case PM_EVENT_SUSPEND:
390 		return ops->suspend_late;
391 	case PM_EVENT_RESUME:
392 		return ops->resume_early;
393 #endif /* CONFIG_SUSPEND */
394 #ifdef CONFIG_HIBERNATE_CALLBACKS
395 	case PM_EVENT_FREEZE:
396 	case PM_EVENT_QUIESCE:
397 		return ops->freeze_late;
398 	case PM_EVENT_HIBERNATE:
399 		return ops->poweroff_late;
400 	case PM_EVENT_THAW:
401 	case PM_EVENT_RECOVER:
402 		return ops->thaw_early;
403 	case PM_EVENT_RESTORE:
404 		return ops->restore_early;
405 #endif /* CONFIG_HIBERNATE_CALLBACKS */
406 	}
407 
408 	return NULL;
409 }
410 
411 /**
412  * pm_noirq_op - Return the PM operation appropriate for given PM event.
413  * @ops: PM operations to choose from.
414  * @state: PM transition of the system being carried out.
415  *
416  * The driver of @dev will not receive interrupts while this function is being
417  * executed.
418  */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)419 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
420 {
421 	switch (state.event) {
422 #ifdef CONFIG_SUSPEND
423 	case PM_EVENT_SUSPEND:
424 		return ops->suspend_noirq;
425 	case PM_EVENT_RESUME:
426 		return ops->resume_noirq;
427 #endif /* CONFIG_SUSPEND */
428 #ifdef CONFIG_HIBERNATE_CALLBACKS
429 	case PM_EVENT_FREEZE:
430 	case PM_EVENT_QUIESCE:
431 		return ops->freeze_noirq;
432 	case PM_EVENT_HIBERNATE:
433 		return ops->poweroff_noirq;
434 	case PM_EVENT_THAW:
435 	case PM_EVENT_RECOVER:
436 		return ops->thaw_noirq;
437 	case PM_EVENT_RESTORE:
438 		return ops->restore_noirq;
439 #endif /* CONFIG_HIBERNATE_CALLBACKS */
440 	}
441 
442 	return NULL;
443 }
444 
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)445 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
446 {
447 	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
448 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
449 		", may wakeup" : "", dev->power.driver_flags);
450 }
451 
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)452 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
453 			int error)
454 {
455 	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
456 		error);
457 }
458 
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)459 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
460 			  const char *info)
461 {
462 	ktime_t calltime;
463 	u64 usecs64;
464 	int usecs;
465 
466 	calltime = ktime_get();
467 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
468 	do_div(usecs64, NSEC_PER_USEC);
469 	usecs = usecs64;
470 	if (usecs == 0)
471 		usecs = 1;
472 
473 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
474 		  info ?: "", info ? " " : "", pm_verb(state.event),
475 		  error ? "aborted" : "complete",
476 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
477 }
478 
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)479 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
480 			    pm_message_t state, const char *info)
481 {
482 	ktime_t calltime;
483 	int error;
484 
485 	if (!cb)
486 		return 0;
487 
488 	calltime = initcall_debug_start(dev, cb);
489 
490 	pm_dev_dbg(dev, state, info);
491 	trace_device_pm_callback_start(dev, info, state.event);
492 	error = cb(dev);
493 	trace_device_pm_callback_end(dev, error);
494 	suspend_report_result(dev, cb, error);
495 
496 	initcall_debug_report(dev, calltime, cb, error);
497 
498 	return error;
499 }
500 
501 #ifdef CONFIG_DPM_WATCHDOG
502 struct dpm_watchdog {
503 	struct device		*dev;
504 	struct task_struct	*tsk;
505 	struct timer_list	timer;
506 };
507 
508 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
509 	struct dpm_watchdog wd
510 
511 /**
512  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
513  * @t: The timer that PM watchdog depends on.
514  *
515  * Called when a driver has timed out suspending or resuming.
516  * There's not much we can do here to recover so panic() to
517  * capture a crash-dump in pstore.
518  */
dpm_watchdog_handler(struct timer_list * t)519 static void dpm_watchdog_handler(struct timer_list *t)
520 {
521 	struct dpm_watchdog *wd = from_timer(wd, t, timer);
522 
523 	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
524 	show_stack(wd->tsk, NULL, KERN_EMERG);
525 	panic("%s %s: unrecoverable failure\n",
526 		dev_driver_string(wd->dev), dev_name(wd->dev));
527 }
528 
529 /**
530  * dpm_watchdog_set - Enable pm watchdog for given device.
531  * @wd: Watchdog. Must be allocated on the stack.
532  * @dev: Device to handle.
533  */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)534 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
535 {
536 	struct timer_list *timer = &wd->timer;
537 
538 	wd->dev = dev;
539 	wd->tsk = current;
540 
541 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
542 	/* use same timeout value for both suspend and resume */
543 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
544 	add_timer(timer);
545 }
546 
547 /**
548  * dpm_watchdog_clear - Disable suspend/resume watchdog.
549  * @wd: Watchdog to disable.
550  */
dpm_watchdog_clear(struct dpm_watchdog * wd)551 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
552 {
553 	struct timer_list *timer = &wd->timer;
554 
555 	del_timer_sync(timer);
556 	destroy_timer_on_stack(timer);
557 }
558 #else
559 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
560 #define dpm_watchdog_set(x, y)
561 #define dpm_watchdog_clear(x)
562 #endif
563 
564 /*------------------------- Resume routines -------------------------*/
565 
566 /**
567  * dev_pm_skip_resume - System-wide device resume optimization check.
568  * @dev: Target device.
569  *
570  * Return:
571  * - %false if the transition under way is RESTORE.
572  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
573  * - The logical negation of %power.must_resume otherwise (that is, when the
574  *   transition under way is RESUME).
575  */
dev_pm_skip_resume(struct device * dev)576 bool dev_pm_skip_resume(struct device *dev)
577 {
578 	if (pm_transition.event == PM_EVENT_RESTORE)
579 		return false;
580 
581 	if (pm_transition.event == PM_EVENT_THAW)
582 		return dev_pm_skip_suspend(dev);
583 
584 	return !dev->power.must_resume;
585 }
586 
is_async(struct device * dev)587 static bool is_async(struct device *dev)
588 {
589 	return dev->power.async_suspend && pm_async_enabled
590 		&& !pm_trace_is_enabled();
591 }
592 
dpm_async_fn(struct device * dev,async_func_t func)593 static bool dpm_async_fn(struct device *dev, async_func_t func)
594 {
595 	reinit_completion(&dev->power.completion);
596 
597 	if (is_async(dev)) {
598 		dev->power.async_in_progress = true;
599 
600 		get_device(dev);
601 
602 		if (async_schedule_dev_nocall(func, dev))
603 			return true;
604 
605 		put_device(dev);
606 	}
607 	/*
608 	 * Because async_schedule_dev_nocall() above has returned false or it
609 	 * has not been called at all, func() is not running and it is safe to
610 	 * update the async_in_progress flag without extra synchronization.
611 	 */
612 	dev->power.async_in_progress = false;
613 	return false;
614 }
615 
616 /**
617  * device_resume_noirq - Execute a "noirq resume" callback for given device.
618  * @dev: Device to handle.
619  * @state: PM transition of the system being carried out.
620  * @async: If true, the device is being resumed asynchronously.
621  *
622  * The driver of @dev will not receive interrupts while this function is being
623  * executed.
624  */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)625 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
626 {
627 	pm_callback_t callback = NULL;
628 	const char *info = NULL;
629 	bool skip_resume;
630 	int error = 0;
631 
632 	TRACE_DEVICE(dev);
633 	TRACE_RESUME(0);
634 
635 	if (dev->power.syscore || dev->power.direct_complete)
636 		goto Out;
637 
638 	if (!dev->power.is_noirq_suspended)
639 		goto Out;
640 
641 	if (!dpm_wait_for_superior(dev, async))
642 		goto Out;
643 
644 	skip_resume = dev_pm_skip_resume(dev);
645 	/*
646 	 * If the driver callback is skipped below or by the middle layer
647 	 * callback and device_resume_early() also skips the driver callback for
648 	 * this device later, it needs to appear as "suspended" to PM-runtime,
649 	 * so change its status accordingly.
650 	 *
651 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
652 	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
653 	 * to avoid confusing drivers that don't use it.
654 	 */
655 	if (skip_resume)
656 		pm_runtime_set_suspended(dev);
657 	else if (dev_pm_skip_suspend(dev))
658 		pm_runtime_set_active(dev);
659 
660 	if (dev->pm_domain) {
661 		info = "noirq power domain ";
662 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
663 	} else if (dev->type && dev->type->pm) {
664 		info = "noirq type ";
665 		callback = pm_noirq_op(dev->type->pm, state);
666 	} else if (dev->class && dev->class->pm) {
667 		info = "noirq class ";
668 		callback = pm_noirq_op(dev->class->pm, state);
669 	} else if (dev->bus && dev->bus->pm) {
670 		info = "noirq bus ";
671 		callback = pm_noirq_op(dev->bus->pm, state);
672 	}
673 	if (callback)
674 		goto Run;
675 
676 	if (skip_resume)
677 		goto Skip;
678 
679 	if (dev->driver && dev->driver->pm) {
680 		info = "noirq driver ";
681 		callback = pm_noirq_op(dev->driver->pm, state);
682 	}
683 
684 Run:
685 	error = dpm_run_callback(callback, dev, state, info);
686 
687 Skip:
688 	dev->power.is_noirq_suspended = false;
689 
690 Out:
691 	complete_all(&dev->power.completion);
692 	TRACE_RESUME(error);
693 
694 	if (error) {
695 		async_error = error;
696 		dpm_save_failed_dev(dev_name(dev));
697 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
698 	}
699 }
700 
async_resume_noirq(void * data,async_cookie_t cookie)701 static void async_resume_noirq(void *data, async_cookie_t cookie)
702 {
703 	struct device *dev = data;
704 
705 	device_resume_noirq(dev, pm_transition, true);
706 	put_device(dev);
707 }
708 
dpm_noirq_resume_devices(pm_message_t state)709 static void dpm_noirq_resume_devices(pm_message_t state)
710 {
711 	struct device *dev;
712 	ktime_t starttime = ktime_get();
713 
714 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
715 
716 	async_error = 0;
717 	pm_transition = state;
718 
719 	mutex_lock(&dpm_list_mtx);
720 
721 	/*
722 	 * Trigger the resume of "async" devices upfront so they don't have to
723 	 * wait for the "non-async" ones they don't depend on.
724 	 */
725 	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
726 		dpm_async_fn(dev, async_resume_noirq);
727 
728 	while (!list_empty(&dpm_noirq_list)) {
729 		dev = to_device(dpm_noirq_list.next);
730 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
731 
732 		if (!dev->power.async_in_progress) {
733 			get_device(dev);
734 
735 			mutex_unlock(&dpm_list_mtx);
736 
737 			device_resume_noirq(dev, state, false);
738 
739 			put_device(dev);
740 
741 			mutex_lock(&dpm_list_mtx);
742 		}
743 	}
744 	mutex_unlock(&dpm_list_mtx);
745 	async_synchronize_full();
746 	dpm_show_time(starttime, state, 0, "noirq");
747 	if (async_error)
748 		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
749 
750 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
751 }
752 
753 /**
754  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
755  * @state: PM transition of the system being carried out.
756  *
757  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
758  * allow device drivers' interrupt handlers to be called.
759  */
dpm_resume_noirq(pm_message_t state)760 void dpm_resume_noirq(pm_message_t state)
761 {
762 	dpm_noirq_resume_devices(state);
763 
764 	resume_device_irqs();
765 	device_wakeup_disarm_wake_irqs();
766 }
767 
768 /**
769  * device_resume_early - Execute an "early resume" callback for given device.
770  * @dev: Device to handle.
771  * @state: PM transition of the system being carried out.
772  * @async: If true, the device is being resumed asynchronously.
773  *
774  * Runtime PM is disabled for @dev while this function is being executed.
775  */
device_resume_early(struct device * dev,pm_message_t state,bool async)776 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
777 {
778 	pm_callback_t callback = NULL;
779 	const char *info = NULL;
780 	int error = 0;
781 
782 	TRACE_DEVICE(dev);
783 	TRACE_RESUME(0);
784 
785 	if (dev->power.syscore || dev->power.direct_complete)
786 		goto Out;
787 
788 	if (!dev->power.is_late_suspended)
789 		goto Out;
790 
791 	if (!dpm_wait_for_superior(dev, async))
792 		goto Out;
793 
794 	if (dev->pm_domain) {
795 		info = "early power domain ";
796 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
797 	} else if (dev->type && dev->type->pm) {
798 		info = "early type ";
799 		callback = pm_late_early_op(dev->type->pm, state);
800 	} else if (dev->class && dev->class->pm) {
801 		info = "early class ";
802 		callback = pm_late_early_op(dev->class->pm, state);
803 	} else if (dev->bus && dev->bus->pm) {
804 		info = "early bus ";
805 		callback = pm_late_early_op(dev->bus->pm, state);
806 	}
807 	if (callback)
808 		goto Run;
809 
810 	if (dev_pm_skip_resume(dev))
811 		goto Skip;
812 
813 	if (dev->driver && dev->driver->pm) {
814 		info = "early driver ";
815 		callback = pm_late_early_op(dev->driver->pm, state);
816 	}
817 
818 Run:
819 	error = dpm_run_callback(callback, dev, state, info);
820 
821 Skip:
822 	dev->power.is_late_suspended = false;
823 
824 Out:
825 	TRACE_RESUME(error);
826 
827 	pm_runtime_enable(dev);
828 	complete_all(&dev->power.completion);
829 
830 	if (error) {
831 		async_error = error;
832 		dpm_save_failed_dev(dev_name(dev));
833 		pm_dev_err(dev, state, async ? " async early" : " early", error);
834 	}
835 }
836 
async_resume_early(void * data,async_cookie_t cookie)837 static void async_resume_early(void *data, async_cookie_t cookie)
838 {
839 	struct device *dev = data;
840 
841 	device_resume_early(dev, pm_transition, true);
842 	put_device(dev);
843 }
844 
845 /**
846  * dpm_resume_early - Execute "early resume" callbacks for all devices.
847  * @state: PM transition of the system being carried out.
848  */
dpm_resume_early(pm_message_t state)849 void dpm_resume_early(pm_message_t state)
850 {
851 	struct device *dev;
852 	ktime_t starttime = ktime_get();
853 
854 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
855 
856 	async_error = 0;
857 	pm_transition = state;
858 
859 	mutex_lock(&dpm_list_mtx);
860 
861 	/*
862 	 * Trigger the resume of "async" devices upfront so they don't have to
863 	 * wait for the "non-async" ones they don't depend on.
864 	 */
865 	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
866 		dpm_async_fn(dev, async_resume_early);
867 
868 	while (!list_empty(&dpm_late_early_list)) {
869 		dev = to_device(dpm_late_early_list.next);
870 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
871 
872 		if (!dev->power.async_in_progress) {
873 			get_device(dev);
874 
875 			mutex_unlock(&dpm_list_mtx);
876 
877 			device_resume_early(dev, state, false);
878 
879 			put_device(dev);
880 
881 			mutex_lock(&dpm_list_mtx);
882 		}
883 	}
884 	mutex_unlock(&dpm_list_mtx);
885 	async_synchronize_full();
886 	dpm_show_time(starttime, state, 0, "early");
887 	if (async_error)
888 		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
889 
890 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
891 }
892 
893 /**
894  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
895  * @state: PM transition of the system being carried out.
896  */
dpm_resume_start(pm_message_t state)897 void dpm_resume_start(pm_message_t state)
898 {
899 	dpm_resume_noirq(state);
900 	dpm_resume_early(state);
901 }
902 EXPORT_SYMBOL_GPL(dpm_resume_start);
903 
904 /**
905  * device_resume - Execute "resume" callbacks for given device.
906  * @dev: Device to handle.
907  * @state: PM transition of the system being carried out.
908  * @async: If true, the device is being resumed asynchronously.
909  */
device_resume(struct device * dev,pm_message_t state,bool async)910 static void device_resume(struct device *dev, pm_message_t state, bool async)
911 {
912 	pm_callback_t callback = NULL;
913 	const char *info = NULL;
914 	int error = 0;
915 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
916 
917 	TRACE_DEVICE(dev);
918 	TRACE_RESUME(0);
919 
920 	if (dev->power.syscore)
921 		goto Complete;
922 
923 	if (!dev->power.is_suspended)
924 		goto Complete;
925 
926 	dev->power.is_suspended = false;
927 
928 	if (dev->power.direct_complete) {
929 		/* Match the pm_runtime_disable() in __device_suspend(). */
930 		pm_runtime_enable(dev);
931 		goto Complete;
932 	}
933 
934 	if (!dpm_wait_for_superior(dev, async))
935 		goto Complete;
936 
937 	dpm_watchdog_set(&wd, dev);
938 	device_lock(dev);
939 
940 	/*
941 	 * This is a fib.  But we'll allow new children to be added below
942 	 * a resumed device, even if the device hasn't been completed yet.
943 	 */
944 	dev->power.is_prepared = false;
945 
946 	if (dev->pm_domain) {
947 		info = "power domain ";
948 		callback = pm_op(&dev->pm_domain->ops, state);
949 		goto Driver;
950 	}
951 
952 	if (dev->type && dev->type->pm) {
953 		info = "type ";
954 		callback = pm_op(dev->type->pm, state);
955 		goto Driver;
956 	}
957 
958 	if (dev->class && dev->class->pm) {
959 		info = "class ";
960 		callback = pm_op(dev->class->pm, state);
961 		goto Driver;
962 	}
963 
964 	if (dev->bus) {
965 		if (dev->bus->pm) {
966 			info = "bus ";
967 			callback = pm_op(dev->bus->pm, state);
968 		} else if (dev->bus->resume) {
969 			info = "legacy bus ";
970 			callback = dev->bus->resume;
971 			goto End;
972 		}
973 	}
974 
975  Driver:
976 	if (!callback && dev->driver && dev->driver->pm) {
977 		info = "driver ";
978 		callback = pm_op(dev->driver->pm, state);
979 	}
980 
981  End:
982 	error = dpm_run_callback(callback, dev, state, info);
983 
984 	device_unlock(dev);
985 	dpm_watchdog_clear(&wd);
986 
987  Complete:
988 	complete_all(&dev->power.completion);
989 
990 	TRACE_RESUME(error);
991 
992 	if (error) {
993 		async_error = error;
994 		dpm_save_failed_dev(dev_name(dev));
995 		pm_dev_err(dev, state, async ? " async" : "", error);
996 	}
997 }
998 
async_resume(void * data,async_cookie_t cookie)999 static void async_resume(void *data, async_cookie_t cookie)
1000 {
1001 	struct device *dev = data;
1002 
1003 	device_resume(dev, pm_transition, true);
1004 	put_device(dev);
1005 }
1006 
1007 /**
1008  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1009  * @state: PM transition of the system being carried out.
1010  *
1011  * Execute the appropriate "resume" callback for all devices whose status
1012  * indicates that they are suspended.
1013  */
dpm_resume(pm_message_t state)1014 void dpm_resume(pm_message_t state)
1015 {
1016 	struct device *dev;
1017 	ktime_t starttime = ktime_get();
1018 
1019 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1020 	might_sleep();
1021 
1022 	pm_transition = state;
1023 	async_error = 0;
1024 
1025 	mutex_lock(&dpm_list_mtx);
1026 
1027 	/*
1028 	 * Trigger the resume of "async" devices upfront so they don't have to
1029 	 * wait for the "non-async" ones they don't depend on.
1030 	 */
1031 	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1032 		dpm_async_fn(dev, async_resume);
1033 
1034 	while (!list_empty(&dpm_suspended_list)) {
1035 		dev = to_device(dpm_suspended_list.next);
1036 		list_move_tail(&dev->power.entry, &dpm_prepared_list);
1037 
1038 		if (!dev->power.async_in_progress) {
1039 			get_device(dev);
1040 
1041 			mutex_unlock(&dpm_list_mtx);
1042 
1043 			device_resume(dev, state, false);
1044 
1045 			put_device(dev);
1046 
1047 			mutex_lock(&dpm_list_mtx);
1048 		}
1049 	}
1050 	mutex_unlock(&dpm_list_mtx);
1051 	async_synchronize_full();
1052 	dpm_show_time(starttime, state, 0, NULL);
1053 	if (async_error)
1054 		dpm_save_failed_step(SUSPEND_RESUME);
1055 
1056 	cpufreq_resume();
1057 	devfreq_resume();
1058 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1059 }
1060 
1061 /**
1062  * device_complete - Complete a PM transition for given device.
1063  * @dev: Device to handle.
1064  * @state: PM transition of the system being carried out.
1065  */
device_complete(struct device * dev,pm_message_t state)1066 static void device_complete(struct device *dev, pm_message_t state)
1067 {
1068 	void (*callback)(struct device *) = NULL;
1069 	const char *info = NULL;
1070 
1071 	if (dev->power.syscore)
1072 		goto out;
1073 
1074 	device_lock(dev);
1075 
1076 	if (dev->pm_domain) {
1077 		info = "completing power domain ";
1078 		callback = dev->pm_domain->ops.complete;
1079 	} else if (dev->type && dev->type->pm) {
1080 		info = "completing type ";
1081 		callback = dev->type->pm->complete;
1082 	} else if (dev->class && dev->class->pm) {
1083 		info = "completing class ";
1084 		callback = dev->class->pm->complete;
1085 	} else if (dev->bus && dev->bus->pm) {
1086 		info = "completing bus ";
1087 		callback = dev->bus->pm->complete;
1088 	}
1089 
1090 	if (!callback && dev->driver && dev->driver->pm) {
1091 		info = "completing driver ";
1092 		callback = dev->driver->pm->complete;
1093 	}
1094 
1095 	if (callback) {
1096 		pm_dev_dbg(dev, state, info);
1097 		callback(dev);
1098 	}
1099 
1100 	device_unlock(dev);
1101 
1102 out:
1103 	pm_runtime_put(dev);
1104 }
1105 
1106 /**
1107  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1108  * @state: PM transition of the system being carried out.
1109  *
1110  * Execute the ->complete() callbacks for all devices whose PM status is not
1111  * DPM_ON (this allows new devices to be registered).
1112  */
dpm_complete(pm_message_t state)1113 void dpm_complete(pm_message_t state)
1114 {
1115 	struct list_head list;
1116 
1117 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1118 	might_sleep();
1119 
1120 	INIT_LIST_HEAD(&list);
1121 	mutex_lock(&dpm_list_mtx);
1122 	while (!list_empty(&dpm_prepared_list)) {
1123 		struct device *dev = to_device(dpm_prepared_list.prev);
1124 
1125 		get_device(dev);
1126 		dev->power.is_prepared = false;
1127 		list_move(&dev->power.entry, &list);
1128 
1129 		mutex_unlock(&dpm_list_mtx);
1130 
1131 		trace_device_pm_callback_start(dev, "", state.event);
1132 		device_complete(dev, state);
1133 		trace_device_pm_callback_end(dev, 0);
1134 
1135 		put_device(dev);
1136 
1137 		mutex_lock(&dpm_list_mtx);
1138 	}
1139 	list_splice(&list, &dpm_list);
1140 	mutex_unlock(&dpm_list_mtx);
1141 
1142 	/* Allow device probing and trigger re-probing of deferred devices */
1143 	device_unblock_probing();
1144 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1145 }
1146 
1147 /**
1148  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1149  * @state: PM transition of the system being carried out.
1150  *
1151  * Execute "resume" callbacks for all devices and complete the PM transition of
1152  * the system.
1153  */
dpm_resume_end(pm_message_t state)1154 void dpm_resume_end(pm_message_t state)
1155 {
1156 	dpm_resume(state);
1157 	dpm_complete(state);
1158 }
1159 EXPORT_SYMBOL_GPL(dpm_resume_end);
1160 
1161 
1162 /*------------------------- Suspend routines -------------------------*/
1163 
1164 /**
1165  * resume_event - Return a "resume" message for given "suspend" sleep state.
1166  * @sleep_state: PM message representing a sleep state.
1167  *
1168  * Return a PM message representing the resume event corresponding to given
1169  * sleep state.
1170  */
resume_event(pm_message_t sleep_state)1171 static pm_message_t resume_event(pm_message_t sleep_state)
1172 {
1173 	switch (sleep_state.event) {
1174 	case PM_EVENT_SUSPEND:
1175 		return PMSG_RESUME;
1176 	case PM_EVENT_FREEZE:
1177 	case PM_EVENT_QUIESCE:
1178 		return PMSG_RECOVER;
1179 	case PM_EVENT_HIBERNATE:
1180 		return PMSG_RESTORE;
1181 	}
1182 	return PMSG_ON;
1183 }
1184 
dpm_superior_set_must_resume(struct device * dev)1185 static void dpm_superior_set_must_resume(struct device *dev)
1186 {
1187 	struct device_link *link;
1188 	int idx;
1189 
1190 	if (dev->parent)
1191 		dev->parent->power.must_resume = true;
1192 
1193 	idx = device_links_read_lock();
1194 
1195 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1196 		link->supplier->power.must_resume = true;
1197 
1198 	device_links_read_unlock(idx);
1199 }
1200 
1201 /**
1202  * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1203  * @dev: Device to handle.
1204  * @state: PM transition of the system being carried out.
1205  * @async: If true, the device is being suspended asynchronously.
1206  *
1207  * The driver of @dev will not receive interrupts while this function is being
1208  * executed.
1209  */
device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1210 static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1211 {
1212 	pm_callback_t callback = NULL;
1213 	const char *info = NULL;
1214 	int error = 0;
1215 
1216 	TRACE_DEVICE(dev);
1217 	TRACE_SUSPEND(0);
1218 
1219 	dpm_wait_for_subordinate(dev, async);
1220 
1221 	if (async_error)
1222 		goto Complete;
1223 
1224 	if (dev->power.syscore || dev->power.direct_complete)
1225 		goto Complete;
1226 
1227 	if (dev->pm_domain) {
1228 		info = "noirq power domain ";
1229 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1230 	} else if (dev->type && dev->type->pm) {
1231 		info = "noirq type ";
1232 		callback = pm_noirq_op(dev->type->pm, state);
1233 	} else if (dev->class && dev->class->pm) {
1234 		info = "noirq class ";
1235 		callback = pm_noirq_op(dev->class->pm, state);
1236 	} else if (dev->bus && dev->bus->pm) {
1237 		info = "noirq bus ";
1238 		callback = pm_noirq_op(dev->bus->pm, state);
1239 	}
1240 	if (callback)
1241 		goto Run;
1242 
1243 	if (dev_pm_skip_suspend(dev))
1244 		goto Skip;
1245 
1246 	if (dev->driver && dev->driver->pm) {
1247 		info = "noirq driver ";
1248 		callback = pm_noirq_op(dev->driver->pm, state);
1249 	}
1250 
1251 Run:
1252 	error = dpm_run_callback(callback, dev, state, info);
1253 	if (error) {
1254 		async_error = error;
1255 		log_suspend_abort_reason("Device %s failed to %s noirq: error %d",
1256 					 dev_name(dev), pm_verb(state.event), error);
1257 		dpm_save_failed_dev(dev_name(dev));
1258 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1259 		goto Complete;
1260 	}
1261 
1262 Skip:
1263 	dev->power.is_noirq_suspended = true;
1264 
1265 	/*
1266 	 * Devices must be resumed unless they are explicitly allowed to be left
1267 	 * in suspend, but even in that case skipping the resume of devices that
1268 	 * were in use right before the system suspend (as indicated by their
1269 	 * runtime PM usage counters and child counters) would be suboptimal.
1270 	 */
1271 	if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1272 	      dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1273 		dev->power.must_resume = true;
1274 
1275 	if (dev->power.must_resume)
1276 		dpm_superior_set_must_resume(dev);
1277 
1278 Complete:
1279 	complete_all(&dev->power.completion);
1280 	TRACE_SUSPEND(error);
1281 	return error;
1282 }
1283 
async_suspend_noirq(void * data,async_cookie_t cookie)1284 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1285 {
1286 	struct device *dev = data;
1287 
1288 	device_suspend_noirq(dev, pm_transition, true);
1289 	put_device(dev);
1290 }
1291 
dpm_noirq_suspend_devices(pm_message_t state)1292 static int dpm_noirq_suspend_devices(pm_message_t state)
1293 {
1294 	ktime_t starttime = ktime_get();
1295 	int error = 0;
1296 
1297 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1298 
1299 	pm_transition = state;
1300 	async_error = 0;
1301 
1302 	mutex_lock(&dpm_list_mtx);
1303 
1304 	while (!list_empty(&dpm_late_early_list)) {
1305 		struct device *dev = to_device(dpm_late_early_list.prev);
1306 
1307 		list_move(&dev->power.entry, &dpm_noirq_list);
1308 
1309 		if (dpm_async_fn(dev, async_suspend_noirq))
1310 			continue;
1311 
1312 		get_device(dev);
1313 
1314 		mutex_unlock(&dpm_list_mtx);
1315 
1316 		error = device_suspend_noirq(dev, state, false);
1317 
1318 		put_device(dev);
1319 
1320 		mutex_lock(&dpm_list_mtx);
1321 
1322 		if (error || async_error)
1323 			break;
1324 	}
1325 
1326 	mutex_unlock(&dpm_list_mtx);
1327 
1328 	async_synchronize_full();
1329 	if (!error)
1330 		error = async_error;
1331 
1332 	if (error)
1333 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1334 
1335 	dpm_show_time(starttime, state, error, "noirq");
1336 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1337 	return error;
1338 }
1339 
1340 /**
1341  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1342  * @state: PM transition of the system being carried out.
1343  *
1344  * Prevent device drivers' interrupt handlers from being called and invoke
1345  * "noirq" suspend callbacks for all non-sysdev devices.
1346  */
dpm_suspend_noirq(pm_message_t state)1347 int dpm_suspend_noirq(pm_message_t state)
1348 {
1349 	int ret;
1350 
1351 	device_wakeup_arm_wake_irqs();
1352 	suspend_device_irqs();
1353 
1354 	ret = dpm_noirq_suspend_devices(state);
1355 	if (ret)
1356 		dpm_resume_noirq(resume_event(state));
1357 
1358 	return ret;
1359 }
1360 
dpm_propagate_wakeup_to_parent(struct device * dev)1361 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1362 {
1363 	struct device *parent = dev->parent;
1364 
1365 	if (!parent)
1366 		return;
1367 
1368 	spin_lock_irq(&parent->power.lock);
1369 
1370 	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1371 		parent->power.wakeup_path = true;
1372 
1373 	spin_unlock_irq(&parent->power.lock);
1374 }
1375 
1376 /**
1377  * device_suspend_late - Execute a "late suspend" callback for given device.
1378  * @dev: Device to handle.
1379  * @state: PM transition of the system being carried out.
1380  * @async: If true, the device is being suspended asynchronously.
1381  *
1382  * Runtime PM is disabled for @dev while this function is being executed.
1383  */
device_suspend_late(struct device * dev,pm_message_t state,bool async)1384 static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
1385 {
1386 	pm_callback_t callback = NULL;
1387 	const char *info = NULL;
1388 	int error = 0;
1389 
1390 	TRACE_DEVICE(dev);
1391 	TRACE_SUSPEND(0);
1392 
1393 	__pm_runtime_disable(dev, false);
1394 
1395 	dpm_wait_for_subordinate(dev, async);
1396 
1397 	if (async_error)
1398 		goto Complete;
1399 
1400 	if (pm_wakeup_pending()) {
1401 		async_error = -EBUSY;
1402 		goto Complete;
1403 	}
1404 
1405 	if (dev->power.syscore || dev->power.direct_complete)
1406 		goto Complete;
1407 
1408 	if (dev->pm_domain) {
1409 		info = "late power domain ";
1410 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1411 	} else if (dev->type && dev->type->pm) {
1412 		info = "late type ";
1413 		callback = pm_late_early_op(dev->type->pm, state);
1414 	} else if (dev->class && dev->class->pm) {
1415 		info = "late class ";
1416 		callback = pm_late_early_op(dev->class->pm, state);
1417 	} else if (dev->bus && dev->bus->pm) {
1418 		info = "late bus ";
1419 		callback = pm_late_early_op(dev->bus->pm, state);
1420 	}
1421 	if (callback)
1422 		goto Run;
1423 
1424 	if (dev_pm_skip_suspend(dev))
1425 		goto Skip;
1426 
1427 	if (dev->driver && dev->driver->pm) {
1428 		info = "late driver ";
1429 		callback = pm_late_early_op(dev->driver->pm, state);
1430 	}
1431 
1432 Run:
1433 	error = dpm_run_callback(callback, dev, state, info);
1434 	if (error) {
1435 		async_error = error;
1436 		log_suspend_abort_reason("Device %s failed to %s late: error %d",
1437 					 dev_name(dev), pm_verb(state.event), error);
1438 		dpm_save_failed_dev(dev_name(dev));
1439 		pm_dev_err(dev, state, async ? " async late" : " late", error);
1440 		goto Complete;
1441 	}
1442 	dpm_propagate_wakeup_to_parent(dev);
1443 
1444 Skip:
1445 	dev->power.is_late_suspended = true;
1446 
1447 Complete:
1448 	TRACE_SUSPEND(error);
1449 	complete_all(&dev->power.completion);
1450 	return error;
1451 }
1452 
async_suspend_late(void * data,async_cookie_t cookie)1453 static void async_suspend_late(void *data, async_cookie_t cookie)
1454 {
1455 	struct device *dev = data;
1456 
1457 	device_suspend_late(dev, pm_transition, true);
1458 	put_device(dev);
1459 }
1460 
1461 /**
1462  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1463  * @state: PM transition of the system being carried out.
1464  */
dpm_suspend_late(pm_message_t state)1465 int dpm_suspend_late(pm_message_t state)
1466 {
1467 	ktime_t starttime = ktime_get();
1468 	int error = 0;
1469 
1470 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1471 
1472 	pm_transition = state;
1473 	async_error = 0;
1474 
1475 	wake_up_all_idle_cpus();
1476 
1477 	mutex_lock(&dpm_list_mtx);
1478 
1479 	while (!list_empty(&dpm_suspended_list)) {
1480 		struct device *dev = to_device(dpm_suspended_list.prev);
1481 
1482 		list_move(&dev->power.entry, &dpm_late_early_list);
1483 
1484 		if (dpm_async_fn(dev, async_suspend_late))
1485 			continue;
1486 
1487 		get_device(dev);
1488 
1489 		mutex_unlock(&dpm_list_mtx);
1490 
1491 		error = device_suspend_late(dev, state, false);
1492 
1493 		put_device(dev);
1494 
1495 		mutex_lock(&dpm_list_mtx);
1496 
1497 		if (error || async_error)
1498 			break;
1499 	}
1500 
1501 	mutex_unlock(&dpm_list_mtx);
1502 
1503 	async_synchronize_full();
1504 	if (!error)
1505 		error = async_error;
1506 
1507 	if (error) {
1508 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1509 		dpm_resume_early(resume_event(state));
1510 	}
1511 	dpm_show_time(starttime, state, error, "late");
1512 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1513 	return error;
1514 }
1515 
1516 /**
1517  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1518  * @state: PM transition of the system being carried out.
1519  */
dpm_suspend_end(pm_message_t state)1520 int dpm_suspend_end(pm_message_t state)
1521 {
1522 	ktime_t starttime = ktime_get();
1523 	int error;
1524 
1525 	error = dpm_suspend_late(state);
1526 	if (error)
1527 		goto out;
1528 
1529 	error = dpm_suspend_noirq(state);
1530 	if (error)
1531 		dpm_resume_early(resume_event(state));
1532 
1533 out:
1534 	dpm_show_time(starttime, state, error, "end");
1535 	return error;
1536 }
1537 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1538 
1539 /**
1540  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1541  * @dev: Device to suspend.
1542  * @state: PM transition of the system being carried out.
1543  * @cb: Suspend callback to execute.
1544  * @info: string description of caller.
1545  */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1546 static int legacy_suspend(struct device *dev, pm_message_t state,
1547 			  int (*cb)(struct device *dev, pm_message_t state),
1548 			  const char *info)
1549 {
1550 	int error;
1551 	ktime_t calltime;
1552 
1553 	calltime = initcall_debug_start(dev, cb);
1554 
1555 	trace_device_pm_callback_start(dev, info, state.event);
1556 	error = cb(dev, state);
1557 	trace_device_pm_callback_end(dev, error);
1558 	suspend_report_result(dev, cb, error);
1559 
1560 	initcall_debug_report(dev, calltime, cb, error);
1561 
1562 	return error;
1563 }
1564 
dpm_clear_superiors_direct_complete(struct device * dev)1565 static void dpm_clear_superiors_direct_complete(struct device *dev)
1566 {
1567 	struct device_link *link;
1568 	int idx;
1569 
1570 	if (dev->parent) {
1571 		spin_lock_irq(&dev->parent->power.lock);
1572 		dev->parent->power.direct_complete = false;
1573 		spin_unlock_irq(&dev->parent->power.lock);
1574 	}
1575 
1576 	idx = device_links_read_lock();
1577 
1578 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1579 		spin_lock_irq(&link->supplier->power.lock);
1580 		link->supplier->power.direct_complete = false;
1581 		spin_unlock_irq(&link->supplier->power.lock);
1582 	}
1583 
1584 	device_links_read_unlock(idx);
1585 }
1586 
1587 /**
1588  * device_suspend - Execute "suspend" callbacks for given device.
1589  * @dev: Device to handle.
1590  * @state: PM transition of the system being carried out.
1591  * @async: If true, the device is being suspended asynchronously.
1592  */
device_suspend(struct device * dev,pm_message_t state,bool async)1593 static int device_suspend(struct device *dev, pm_message_t state, bool async)
1594 {
1595 	pm_callback_t callback = NULL;
1596 	const char *info = NULL;
1597 	int error = 0;
1598 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1599 
1600 	TRACE_DEVICE(dev);
1601 	TRACE_SUSPEND(0);
1602 
1603 	dpm_wait_for_subordinate(dev, async);
1604 
1605 	if (async_error) {
1606 		dev->power.direct_complete = false;
1607 		goto Complete;
1608 	}
1609 
1610 	/*
1611 	 * Wait for possible runtime PM transitions of the device in progress
1612 	 * to complete and if there's a runtime resume request pending for it,
1613 	 * resume it before proceeding with invoking the system-wide suspend
1614 	 * callbacks for it.
1615 	 *
1616 	 * If the system-wide suspend callbacks below change the configuration
1617 	 * of the device, they must disable runtime PM for it or otherwise
1618 	 * ensure that its runtime-resume callbacks will not be confused by that
1619 	 * change in case they are invoked going forward.
1620 	 */
1621 	pm_runtime_barrier(dev);
1622 
1623 	if (pm_wakeup_pending()) {
1624 		dev->power.direct_complete = false;
1625 		async_error = -EBUSY;
1626 		goto Complete;
1627 	}
1628 
1629 	if (dev->power.syscore)
1630 		goto Complete;
1631 
1632 	/* Avoid direct_complete to let wakeup_path propagate. */
1633 	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1634 		dev->power.direct_complete = false;
1635 
1636 	if (dev->power.direct_complete) {
1637 		if (pm_runtime_status_suspended(dev)) {
1638 			pm_runtime_disable(dev);
1639 			if (pm_runtime_status_suspended(dev)) {
1640 				pm_dev_dbg(dev, state, "direct-complete ");
1641 				dev->power.is_suspended = true;
1642 				goto Complete;
1643 			}
1644 
1645 			pm_runtime_enable(dev);
1646 		}
1647 		dev->power.direct_complete = false;
1648 	}
1649 
1650 	dev->power.may_skip_resume = true;
1651 	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1652 
1653 	dpm_watchdog_set(&wd, dev);
1654 	device_lock(dev);
1655 
1656 	if (dev->pm_domain) {
1657 		info = "power domain ";
1658 		callback = pm_op(&dev->pm_domain->ops, state);
1659 		goto Run;
1660 	}
1661 
1662 	if (dev->type && dev->type->pm) {
1663 		info = "type ";
1664 		callback = pm_op(dev->type->pm, state);
1665 		goto Run;
1666 	}
1667 
1668 	if (dev->class && dev->class->pm) {
1669 		info = "class ";
1670 		callback = pm_op(dev->class->pm, state);
1671 		goto Run;
1672 	}
1673 
1674 	if (dev->bus) {
1675 		if (dev->bus->pm) {
1676 			info = "bus ";
1677 			callback = pm_op(dev->bus->pm, state);
1678 		} else if (dev->bus->suspend) {
1679 			pm_dev_dbg(dev, state, "legacy bus ");
1680 			error = legacy_suspend(dev, state, dev->bus->suspend,
1681 						"legacy bus ");
1682 			goto End;
1683 		}
1684 	}
1685 
1686  Run:
1687 	if (!callback && dev->driver && dev->driver->pm) {
1688 		info = "driver ";
1689 		callback = pm_op(dev->driver->pm, state);
1690 	}
1691 
1692 	error = dpm_run_callback(callback, dev, state, info);
1693 
1694  End:
1695 	if (!error) {
1696 		dev->power.is_suspended = true;
1697 		if (device_may_wakeup(dev))
1698 			dev->power.wakeup_path = true;
1699 
1700 		dpm_propagate_wakeup_to_parent(dev);
1701 		dpm_clear_superiors_direct_complete(dev);
1702 	} else {
1703 		log_suspend_abort_reason("Device %s failed to %s: error %d",
1704 					 dev_name(dev), pm_verb(state.event), error);
1705 	}
1706 
1707 	device_unlock(dev);
1708 	dpm_watchdog_clear(&wd);
1709 
1710  Complete:
1711 	if (error) {
1712 		async_error = error;
1713 		dpm_save_failed_dev(dev_name(dev));
1714 		pm_dev_err(dev, state, async ? " async" : "", error);
1715 	}
1716 
1717 	complete_all(&dev->power.completion);
1718 	TRACE_SUSPEND(error);
1719 	return error;
1720 }
1721 
async_suspend(void * data,async_cookie_t cookie)1722 static void async_suspend(void *data, async_cookie_t cookie)
1723 {
1724 	struct device *dev = data;
1725 
1726 	device_suspend(dev, pm_transition, true);
1727 	put_device(dev);
1728 }
1729 
1730 /**
1731  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1732  * @state: PM transition of the system being carried out.
1733  */
dpm_suspend(pm_message_t state)1734 int dpm_suspend(pm_message_t state)
1735 {
1736 	ktime_t starttime = ktime_get();
1737 	int error = 0;
1738 
1739 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1740 	might_sleep();
1741 
1742 	devfreq_suspend();
1743 	cpufreq_suspend();
1744 
1745 	pm_transition = state;
1746 	async_error = 0;
1747 
1748 	mutex_lock(&dpm_list_mtx);
1749 
1750 	while (!list_empty(&dpm_prepared_list)) {
1751 		struct device *dev = to_device(dpm_prepared_list.prev);
1752 
1753 		list_move(&dev->power.entry, &dpm_suspended_list);
1754 
1755 		if (dpm_async_fn(dev, async_suspend))
1756 			continue;
1757 
1758 		get_device(dev);
1759 
1760 		mutex_unlock(&dpm_list_mtx);
1761 
1762 		error = device_suspend(dev, state, false);
1763 
1764 		put_device(dev);
1765 
1766 		mutex_lock(&dpm_list_mtx);
1767 
1768 		if (error || async_error)
1769 			break;
1770 	}
1771 
1772 	mutex_unlock(&dpm_list_mtx);
1773 
1774 	async_synchronize_full();
1775 	if (!error)
1776 		error = async_error;
1777 
1778 	if (error)
1779 		dpm_save_failed_step(SUSPEND_SUSPEND);
1780 
1781 	dpm_show_time(starttime, state, error, NULL);
1782 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1783 	return error;
1784 }
1785 
1786 /**
1787  * device_prepare - Prepare a device for system power transition.
1788  * @dev: Device to handle.
1789  * @state: PM transition of the system being carried out.
1790  *
1791  * Execute the ->prepare() callback(s) for given device.  No new children of the
1792  * device may be registered after this function has returned.
1793  */
device_prepare(struct device * dev,pm_message_t state)1794 static int device_prepare(struct device *dev, pm_message_t state)
1795 {
1796 	int (*callback)(struct device *) = NULL;
1797 	int ret = 0;
1798 
1799 	/*
1800 	 * If a device's parent goes into runtime suspend at the wrong time,
1801 	 * it won't be possible to resume the device.  To prevent this we
1802 	 * block runtime suspend here, during the prepare phase, and allow
1803 	 * it again during the complete phase.
1804 	 */
1805 	pm_runtime_get_noresume(dev);
1806 
1807 	if (dev->power.syscore)
1808 		return 0;
1809 
1810 	device_lock(dev);
1811 
1812 	dev->power.wakeup_path = false;
1813 
1814 	if (dev->power.no_pm_callbacks)
1815 		goto unlock;
1816 
1817 	if (dev->pm_domain)
1818 		callback = dev->pm_domain->ops.prepare;
1819 	else if (dev->type && dev->type->pm)
1820 		callback = dev->type->pm->prepare;
1821 	else if (dev->class && dev->class->pm)
1822 		callback = dev->class->pm->prepare;
1823 	else if (dev->bus && dev->bus->pm)
1824 		callback = dev->bus->pm->prepare;
1825 
1826 	if (!callback && dev->driver && dev->driver->pm)
1827 		callback = dev->driver->pm->prepare;
1828 
1829 	if (callback)
1830 		ret = callback(dev);
1831 
1832 unlock:
1833 	device_unlock(dev);
1834 
1835 	if (ret < 0) {
1836 		suspend_report_result(dev, callback, ret);
1837 		pm_runtime_put(dev);
1838 		return ret;
1839 	}
1840 	/*
1841 	 * A positive return value from ->prepare() means "this device appears
1842 	 * to be runtime-suspended and its state is fine, so if it really is
1843 	 * runtime-suspended, you can leave it in that state provided that you
1844 	 * will do the same thing with all of its descendants".  This only
1845 	 * applies to suspend transitions, however.
1846 	 */
1847 	spin_lock_irq(&dev->power.lock);
1848 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1849 		(ret > 0 || dev->power.no_pm_callbacks) &&
1850 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1851 	spin_unlock_irq(&dev->power.lock);
1852 	return 0;
1853 }
1854 
1855 /**
1856  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1857  * @state: PM transition of the system being carried out.
1858  *
1859  * Execute the ->prepare() callback(s) for all devices.
1860  */
dpm_prepare(pm_message_t state)1861 int dpm_prepare(pm_message_t state)
1862 {
1863 	int error = 0;
1864 
1865 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1866 	might_sleep();
1867 
1868 	/*
1869 	 * Give a chance for the known devices to complete their probes, before
1870 	 * disable probing of devices. This sync point is important at least
1871 	 * at boot time + hibernation restore.
1872 	 */
1873 	trace_android_rvh_dpm_prepare(0);
1874 	wait_for_device_probe();
1875 	trace_android_rvh_dpm_prepare(1);
1876 	/*
1877 	 * It is unsafe if probing of devices will happen during suspend or
1878 	 * hibernation and system behavior will be unpredictable in this case.
1879 	 * So, let's prohibit device's probing here and defer their probes
1880 	 * instead. The normal behavior will be restored in dpm_complete().
1881 	 */
1882 	device_block_probing();
1883 
1884 	mutex_lock(&dpm_list_mtx);
1885 	while (!list_empty(&dpm_list) && !error) {
1886 		struct device *dev = to_device(dpm_list.next);
1887 
1888 		get_device(dev);
1889 
1890 		mutex_unlock(&dpm_list_mtx);
1891 
1892 		trace_device_pm_callback_start(dev, "", state.event);
1893 		error = device_prepare(dev, state);
1894 		trace_device_pm_callback_end(dev, error);
1895 
1896 		mutex_lock(&dpm_list_mtx);
1897 
1898 		if (!error) {
1899 			dev->power.is_prepared = true;
1900 			if (!list_empty(&dev->power.entry))
1901 				list_move_tail(&dev->power.entry, &dpm_prepared_list);
1902 		} else if (error == -EAGAIN) {
1903 			error = 0;
1904 		} else {
1905 			dev_info(dev, "not prepared for power transition: code %d\n",
1906 				 error);
1907 			log_suspend_abort_reason("Device %s not prepared for power transition: code %d",
1908 						 dev_name(dev), error);
1909 			dpm_save_failed_dev(dev_name(dev));
1910 		}
1911 
1912 		mutex_unlock(&dpm_list_mtx);
1913 
1914 		put_device(dev);
1915 
1916 		mutex_lock(&dpm_list_mtx);
1917 	}
1918 	mutex_unlock(&dpm_list_mtx);
1919 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1920 	return error;
1921 }
1922 
1923 /**
1924  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1925  * @state: PM transition of the system being carried out.
1926  *
1927  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1928  * callbacks for them.
1929  */
dpm_suspend_start(pm_message_t state)1930 int dpm_suspend_start(pm_message_t state)
1931 {
1932 	ktime_t starttime = ktime_get();
1933 	int error;
1934 
1935 	error = dpm_prepare(state);
1936 	if (error)
1937 		dpm_save_failed_step(SUSPEND_PREPARE);
1938 	else
1939 		error = dpm_suspend(state);
1940 
1941 	dpm_show_time(starttime, state, error, "start");
1942 	return error;
1943 }
1944 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1945 
__suspend_report_result(const char * function,struct device * dev,void * fn,int ret)1946 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1947 {
1948 	if (ret)
1949 		dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
1950 }
1951 EXPORT_SYMBOL_GPL(__suspend_report_result);
1952 
1953 /**
1954  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1955  * @subordinate: Device that needs to wait for @dev.
1956  * @dev: Device to wait for.
1957  */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)1958 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1959 {
1960 	dpm_wait(dev, subordinate->power.async_suspend);
1961 	return async_error;
1962 }
1963 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1964 
1965 /**
1966  * dpm_for_each_dev - device iterator.
1967  * @data: data for the callback.
1968  * @fn: function to be called for each device.
1969  *
1970  * Iterate over devices in dpm_list, and call @fn for each device,
1971  * passing it @data.
1972  */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))1973 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1974 {
1975 	struct device *dev;
1976 
1977 	if (!fn)
1978 		return;
1979 
1980 	device_pm_lock();
1981 	list_for_each_entry(dev, &dpm_list, power.entry)
1982 		fn(dev, data);
1983 	device_pm_unlock();
1984 }
1985 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1986 
pm_ops_is_empty(const struct dev_pm_ops * ops)1987 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1988 {
1989 	if (!ops)
1990 		return true;
1991 
1992 	return !ops->prepare &&
1993 	       !ops->suspend &&
1994 	       !ops->suspend_late &&
1995 	       !ops->suspend_noirq &&
1996 	       !ops->resume_noirq &&
1997 	       !ops->resume_early &&
1998 	       !ops->resume &&
1999 	       !ops->complete;
2000 }
2001 
device_pm_check_callbacks(struct device * dev)2002 void device_pm_check_callbacks(struct device *dev)
2003 {
2004 	unsigned long flags;
2005 
2006 	spin_lock_irqsave(&dev->power.lock, flags);
2007 	dev->power.no_pm_callbacks =
2008 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2009 		 !dev->bus->suspend && !dev->bus->resume)) &&
2010 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2011 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2012 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2013 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2014 		 !dev->driver->suspend && !dev->driver->resume));
2015 	spin_unlock_irqrestore(&dev->power.lock, flags);
2016 }
2017 
dev_pm_skip_suspend(struct device * dev)2018 bool dev_pm_skip_suspend(struct device *dev)
2019 {
2020 	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2021 		pm_runtime_status_suspended(dev);
2022 }
2023