• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 #include <linux/wakeup_reason.h>
38 
39 #include "../base.h"
40 #include "power.h"
41 
42 typedef int (*pm_callback_t)(struct device *);
43 
44 /*
45  * The entries in the dpm_list list are in a depth first order, simply
46  * because children are guaranteed to be discovered after parents, and
47  * are inserted at the back of the list on discovery.
48  *
49  * Since device_pm_add() may be called with a device lock held,
50  * we must never try to acquire a device lock while holding
51  * dpm_list_mutex.
52  */
53 
54 LIST_HEAD(dpm_list);
55 static LIST_HEAD(dpm_prepared_list);
56 static LIST_HEAD(dpm_suspended_list);
57 static LIST_HEAD(dpm_late_early_list);
58 static LIST_HEAD(dpm_noirq_list);
59 
60 struct suspend_stats suspend_stats;
61 static DEFINE_MUTEX(dpm_list_mtx);
62 static pm_message_t pm_transition;
63 
64 static int async_error;
65 
pm_verb(int event)66 static const char *pm_verb(int event)
67 {
68 	switch (event) {
69 	case PM_EVENT_SUSPEND:
70 		return "suspend";
71 	case PM_EVENT_RESUME:
72 		return "resume";
73 	case PM_EVENT_FREEZE:
74 		return "freeze";
75 	case PM_EVENT_QUIESCE:
76 		return "quiesce";
77 	case PM_EVENT_HIBERNATE:
78 		return "hibernate";
79 	case PM_EVENT_THAW:
80 		return "thaw";
81 	case PM_EVENT_RESTORE:
82 		return "restore";
83 	case PM_EVENT_RECOVER:
84 		return "recover";
85 	default:
86 		return "(unknown PM event)";
87 	}
88 }
89 
90 /**
91  * device_pm_sleep_init - Initialize system suspend-related device fields.
92  * @dev: Device object being initialized.
93  */
device_pm_sleep_init(struct device * dev)94 void device_pm_sleep_init(struct device *dev)
95 {
96 	dev->power.is_prepared = false;
97 	dev->power.is_suspended = false;
98 	dev->power.is_noirq_suspended = false;
99 	dev->power.is_late_suspended = false;
100 	init_completion(&dev->power.completion);
101 	complete_all(&dev->power.completion);
102 	dev->power.wakeup = NULL;
103 	INIT_LIST_HEAD(&dev->power.entry);
104 }
105 
106 /**
107  * device_pm_lock - Lock the list of active devices used by the PM core.
108  */
device_pm_lock(void)109 void device_pm_lock(void)
110 {
111 	mutex_lock(&dpm_list_mtx);
112 }
113 
114 /**
115  * device_pm_unlock - Unlock the list of active devices used by the PM core.
116  */
device_pm_unlock(void)117 void device_pm_unlock(void)
118 {
119 	mutex_unlock(&dpm_list_mtx);
120 }
121 
122 /**
123  * device_pm_add - Add a device to the PM core's list of active devices.
124  * @dev: Device to add to the list.
125  */
device_pm_add(struct device * dev)126 void device_pm_add(struct device *dev)
127 {
128 	/* Skip PM setup/initialization. */
129 	if (device_pm_not_required(dev))
130 		return;
131 
132 	pr_debug("Adding info for %s:%s\n",
133 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
134 	device_pm_check_callbacks(dev);
135 	mutex_lock(&dpm_list_mtx);
136 	if (dev->parent && dev->parent->power.is_prepared)
137 		dev_warn(dev, "parent %s should not be sleeping\n",
138 			dev_name(dev->parent));
139 	list_add_tail(&dev->power.entry, &dpm_list);
140 	dev->power.in_dpm_list = true;
141 	mutex_unlock(&dpm_list_mtx);
142 }
143 
144 /**
145  * device_pm_remove - Remove a device from the PM core's list of active devices.
146  * @dev: Device to be removed from the list.
147  */
device_pm_remove(struct device * dev)148 void device_pm_remove(struct device *dev)
149 {
150 	if (device_pm_not_required(dev))
151 		return;
152 
153 	pr_debug("Removing info for %s:%s\n",
154 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
155 	complete_all(&dev->power.completion);
156 	mutex_lock(&dpm_list_mtx);
157 	list_del_init(&dev->power.entry);
158 	dev->power.in_dpm_list = false;
159 	mutex_unlock(&dpm_list_mtx);
160 	device_wakeup_disable(dev);
161 	pm_runtime_remove(dev);
162 	device_pm_check_callbacks(dev);
163 }
164 
165 /**
166  * device_pm_move_before - Move device in the PM core's list of active devices.
167  * @deva: Device to move in dpm_list.
168  * @devb: Device @deva should come before.
169  */
device_pm_move_before(struct device * deva,struct device * devb)170 void device_pm_move_before(struct device *deva, struct device *devb)
171 {
172 	pr_debug("Moving %s:%s before %s:%s\n",
173 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
174 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
175 	/* Delete deva from dpm_list and reinsert before devb. */
176 	list_move_tail(&deva->power.entry, &devb->power.entry);
177 }
178 
179 /**
180  * device_pm_move_after - Move device in the PM core's list of active devices.
181  * @deva: Device to move in dpm_list.
182  * @devb: Device @deva should come after.
183  */
device_pm_move_after(struct device * deva,struct device * devb)184 void device_pm_move_after(struct device *deva, struct device *devb)
185 {
186 	pr_debug("Moving %s:%s after %s:%s\n",
187 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
188 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
189 	/* Delete deva from dpm_list and reinsert after devb. */
190 	list_move(&deva->power.entry, &devb->power.entry);
191 }
192 
193 /**
194  * device_pm_move_last - Move device to end of the PM core's list of devices.
195  * @dev: Device to move in dpm_list.
196  */
device_pm_move_last(struct device * dev)197 void device_pm_move_last(struct device *dev)
198 {
199 	pr_debug("Moving %s:%s to end of list\n",
200 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
201 	list_move_tail(&dev->power.entry, &dpm_list);
202 }
203 
initcall_debug_start(struct device * dev,void * cb)204 static ktime_t initcall_debug_start(struct device *dev, void *cb)
205 {
206 	if (!pm_print_times_enabled)
207 		return 0;
208 
209 	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
210 		 task_pid_nr(current),
211 		 dev->parent ? dev_name(dev->parent) : "none");
212 	return ktime_get();
213 }
214 
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)215 static void initcall_debug_report(struct device *dev, ktime_t calltime,
216 				  void *cb, int error)
217 {
218 	ktime_t rettime;
219 	s64 nsecs;
220 
221 	if (!pm_print_times_enabled)
222 		return;
223 
224 	rettime = ktime_get();
225 	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
226 
227 	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
228 		 (unsigned long long)nsecs >> 10);
229 }
230 
231 /**
232  * dpm_wait - Wait for a PM operation to complete.
233  * @dev: Device to wait for.
234  * @async: If unset, wait only if the device's power.async_suspend flag is set.
235  */
dpm_wait(struct device * dev,bool async)236 static void dpm_wait(struct device *dev, bool async)
237 {
238 	if (!dev)
239 		return;
240 
241 	if (async || (pm_async_enabled && dev->power.async_suspend))
242 		wait_for_completion(&dev->power.completion);
243 }
244 
dpm_wait_fn(struct device * dev,void * async_ptr)245 static int dpm_wait_fn(struct device *dev, void *async_ptr)
246 {
247 	dpm_wait(dev, *((bool *)async_ptr));
248 	return 0;
249 }
250 
dpm_wait_for_children(struct device * dev,bool async)251 static void dpm_wait_for_children(struct device *dev, bool async)
252 {
253        device_for_each_child(dev, &async, dpm_wait_fn);
254 }
255 
dpm_wait_for_suppliers(struct device * dev,bool async)256 static void dpm_wait_for_suppliers(struct device *dev, bool async)
257 {
258 	struct device_link *link;
259 	int idx;
260 
261 	idx = device_links_read_lock();
262 
263 	/*
264 	 * If the supplier goes away right after we've checked the link to it,
265 	 * we'll wait for its completion to change the state, but that's fine,
266 	 * because the only things that will block as a result are the SRCU
267 	 * callbacks freeing the link objects for the links in the list we're
268 	 * walking.
269 	 */
270 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
271 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
272 			dpm_wait(link->supplier, async);
273 
274 	device_links_read_unlock(idx);
275 }
276 
dpm_wait_for_superior(struct device * dev,bool async)277 static bool dpm_wait_for_superior(struct device *dev, bool async)
278 {
279 	struct device *parent;
280 
281 	/*
282 	 * If the device is resumed asynchronously and the parent's callback
283 	 * deletes both the device and the parent itself, the parent object may
284 	 * be freed while this function is running, so avoid that by reference
285 	 * counting the parent once more unless the device has been deleted
286 	 * already (in which case return right away).
287 	 */
288 	mutex_lock(&dpm_list_mtx);
289 
290 	if (!device_pm_initialized(dev)) {
291 		mutex_unlock(&dpm_list_mtx);
292 		return false;
293 	}
294 
295 	parent = get_device(dev->parent);
296 
297 	mutex_unlock(&dpm_list_mtx);
298 
299 	dpm_wait(parent, async);
300 	put_device(parent);
301 
302 	dpm_wait_for_suppliers(dev, async);
303 
304 	/*
305 	 * If the parent's callback has deleted the device, attempting to resume
306 	 * it would be invalid, so avoid doing that then.
307 	 */
308 	return device_pm_initialized(dev);
309 }
310 
dpm_wait_for_consumers(struct device * dev,bool async)311 static void dpm_wait_for_consumers(struct device *dev, bool async)
312 {
313 	struct device_link *link;
314 	int idx;
315 
316 	idx = device_links_read_lock();
317 
318 	/*
319 	 * The status of a device link can only be changed from "dormant" by a
320 	 * probe, but that cannot happen during system suspend/resume.  In
321 	 * theory it can change to "dormant" at that time, but then it is
322 	 * reasonable to wait for the target device anyway (eg. if it goes
323 	 * away, it's better to wait for it to go away completely and then
324 	 * continue instead of trying to continue in parallel with its
325 	 * unregistration).
326 	 */
327 	list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
328 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
329 			dpm_wait(link->consumer, async);
330 
331 	device_links_read_unlock(idx);
332 }
333 
dpm_wait_for_subordinate(struct device * dev,bool async)334 static void dpm_wait_for_subordinate(struct device *dev, bool async)
335 {
336 	dpm_wait_for_children(dev, async);
337 	dpm_wait_for_consumers(dev, async);
338 }
339 
340 /**
341  * pm_op - Return the PM operation appropriate for given PM event.
342  * @ops: PM operations to choose from.
343  * @state: PM transition of the system being carried out.
344  */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)345 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
346 {
347 	switch (state.event) {
348 #ifdef CONFIG_SUSPEND
349 	case PM_EVENT_SUSPEND:
350 		return ops->suspend;
351 	case PM_EVENT_RESUME:
352 		return ops->resume;
353 #endif /* CONFIG_SUSPEND */
354 #ifdef CONFIG_HIBERNATE_CALLBACKS
355 	case PM_EVENT_FREEZE:
356 	case PM_EVENT_QUIESCE:
357 		return ops->freeze;
358 	case PM_EVENT_HIBERNATE:
359 		return ops->poweroff;
360 	case PM_EVENT_THAW:
361 	case PM_EVENT_RECOVER:
362 		return ops->thaw;
363 		break;
364 	case PM_EVENT_RESTORE:
365 		return ops->restore;
366 #endif /* CONFIG_HIBERNATE_CALLBACKS */
367 	}
368 
369 	return NULL;
370 }
371 
372 /**
373  * pm_late_early_op - Return the PM operation appropriate for given PM event.
374  * @ops: PM operations to choose from.
375  * @state: PM transition of the system being carried out.
376  *
377  * Runtime PM is disabled for @dev while this function is being executed.
378  */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)379 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
380 				      pm_message_t state)
381 {
382 	switch (state.event) {
383 #ifdef CONFIG_SUSPEND
384 	case PM_EVENT_SUSPEND:
385 		return ops->suspend_late;
386 	case PM_EVENT_RESUME:
387 		return ops->resume_early;
388 #endif /* CONFIG_SUSPEND */
389 #ifdef CONFIG_HIBERNATE_CALLBACKS
390 	case PM_EVENT_FREEZE:
391 	case PM_EVENT_QUIESCE:
392 		return ops->freeze_late;
393 	case PM_EVENT_HIBERNATE:
394 		return ops->poweroff_late;
395 	case PM_EVENT_THAW:
396 	case PM_EVENT_RECOVER:
397 		return ops->thaw_early;
398 	case PM_EVENT_RESTORE:
399 		return ops->restore_early;
400 #endif /* CONFIG_HIBERNATE_CALLBACKS */
401 	}
402 
403 	return NULL;
404 }
405 
406 /**
407  * pm_noirq_op - Return the PM operation appropriate for given PM event.
408  * @ops: PM operations to choose from.
409  * @state: PM transition of the system being carried out.
410  *
411  * The driver of @dev will not receive interrupts while this function is being
412  * executed.
413  */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)414 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
415 {
416 	switch (state.event) {
417 #ifdef CONFIG_SUSPEND
418 	case PM_EVENT_SUSPEND:
419 		return ops->suspend_noirq;
420 	case PM_EVENT_RESUME:
421 		return ops->resume_noirq;
422 #endif /* CONFIG_SUSPEND */
423 #ifdef CONFIG_HIBERNATE_CALLBACKS
424 	case PM_EVENT_FREEZE:
425 	case PM_EVENT_QUIESCE:
426 		return ops->freeze_noirq;
427 	case PM_EVENT_HIBERNATE:
428 		return ops->poweroff_noirq;
429 	case PM_EVENT_THAW:
430 	case PM_EVENT_RECOVER:
431 		return ops->thaw_noirq;
432 	case PM_EVENT_RESTORE:
433 		return ops->restore_noirq;
434 #endif /* CONFIG_HIBERNATE_CALLBACKS */
435 	}
436 
437 	return NULL;
438 }
439 
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)440 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
441 {
442 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
443 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
444 		", may wakeup" : "");
445 }
446 
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)447 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
448 			int error)
449 {
450 	pr_err("Device %s failed to %s%s: error %d\n",
451 	       dev_name(dev), pm_verb(state.event), info, error);
452 }
453 
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)454 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
455 			  const char *info)
456 {
457 	ktime_t calltime;
458 	u64 usecs64;
459 	int usecs;
460 
461 	calltime = ktime_get();
462 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
463 	do_div(usecs64, NSEC_PER_USEC);
464 	usecs = usecs64;
465 	if (usecs == 0)
466 		usecs = 1;
467 
468 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
469 		  info ?: "", info ? " " : "", pm_verb(state.event),
470 		  error ? "aborted" : "complete",
471 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
472 }
473 
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)474 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
475 			    pm_message_t state, const char *info)
476 {
477 	ktime_t calltime;
478 	int error;
479 
480 	if (!cb)
481 		return 0;
482 
483 	calltime = initcall_debug_start(dev, cb);
484 
485 	pm_dev_dbg(dev, state, info);
486 	trace_device_pm_callback_start(dev, info, state.event);
487 	error = cb(dev);
488 	trace_device_pm_callback_end(dev, error);
489 	suspend_report_result(cb, error);
490 
491 	initcall_debug_report(dev, calltime, cb, error);
492 
493 	return error;
494 }
495 
496 #ifdef CONFIG_DPM_WATCHDOG
497 struct dpm_watchdog {
498 	struct device		*dev;
499 	struct task_struct	*tsk;
500 	struct timer_list	timer;
501 };
502 
503 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
504 	struct dpm_watchdog wd
505 
506 /**
507  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
508  * @t: The timer that PM watchdog depends on.
509  *
510  * Called when a driver has timed out suspending or resuming.
511  * There's not much we can do here to recover so panic() to
512  * capture a crash-dump in pstore.
513  */
dpm_watchdog_handler(struct timer_list * t)514 static void dpm_watchdog_handler(struct timer_list *t)
515 {
516 	struct dpm_watchdog *wd = from_timer(wd, t, timer);
517 
518 	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
519 	show_stack(wd->tsk, NULL);
520 	panic("%s %s: unrecoverable failure\n",
521 		dev_driver_string(wd->dev), dev_name(wd->dev));
522 }
523 
524 /**
525  * dpm_watchdog_set - Enable pm watchdog for given device.
526  * @wd: Watchdog. Must be allocated on the stack.
527  * @dev: Device to handle.
528  */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)529 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
530 {
531 	struct timer_list *timer = &wd->timer;
532 
533 	wd->dev = dev;
534 	wd->tsk = current;
535 
536 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
537 	/* use same timeout value for both suspend and resume */
538 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
539 	add_timer(timer);
540 }
541 
542 /**
543  * dpm_watchdog_clear - Disable suspend/resume watchdog.
544  * @wd: Watchdog to disable.
545  */
dpm_watchdog_clear(struct dpm_watchdog * wd)546 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
547 {
548 	struct timer_list *timer = &wd->timer;
549 
550 	del_timer_sync(timer);
551 	destroy_timer_on_stack(timer);
552 }
553 #else
554 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
555 #define dpm_watchdog_set(x, y)
556 #define dpm_watchdog_clear(x)
557 #endif
558 
559 /*------------------------- Resume routines -------------------------*/
560 
561 /**
562  * suspend_event - Return a "suspend" message for given "resume" one.
563  * @resume_msg: PM message representing a system-wide resume transition.
564  */
suspend_event(pm_message_t resume_msg)565 static pm_message_t suspend_event(pm_message_t resume_msg)
566 {
567 	switch (resume_msg.event) {
568 	case PM_EVENT_RESUME:
569 		return PMSG_SUSPEND;
570 	case PM_EVENT_THAW:
571 	case PM_EVENT_RESTORE:
572 		return PMSG_FREEZE;
573 	case PM_EVENT_RECOVER:
574 		return PMSG_HIBERNATE;
575 	}
576 	return PMSG_ON;
577 }
578 
579 /**
580  * dev_pm_may_skip_resume - System-wide device resume optimization check.
581  * @dev: Target device.
582  *
583  * Checks whether or not the device may be left in suspend after a system-wide
584  * transition to the working state.
585  */
dev_pm_may_skip_resume(struct device * dev)586 bool dev_pm_may_skip_resume(struct device *dev)
587 {
588 	return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
589 }
590 
dpm_subsys_resume_noirq_cb(struct device * dev,pm_message_t state,const char ** info_p)591 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
592 						pm_message_t state,
593 						const char **info_p)
594 {
595 	pm_callback_t callback;
596 	const char *info;
597 
598 	if (dev->pm_domain) {
599 		info = "noirq power domain ";
600 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
601 	} else if (dev->type && dev->type->pm) {
602 		info = "noirq type ";
603 		callback = pm_noirq_op(dev->type->pm, state);
604 	} else if (dev->class && dev->class->pm) {
605 		info = "noirq class ";
606 		callback = pm_noirq_op(dev->class->pm, state);
607 	} else if (dev->bus && dev->bus->pm) {
608 		info = "noirq bus ";
609 		callback = pm_noirq_op(dev->bus->pm, state);
610 	} else {
611 		return NULL;
612 	}
613 
614 	if (info_p)
615 		*info_p = info;
616 
617 	return callback;
618 }
619 
620 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
621 						 pm_message_t state,
622 						 const char **info_p);
623 
624 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
625 						pm_message_t state,
626 						const char **info_p);
627 
628 /**
629  * device_resume_noirq - Execute a "noirq resume" callback for given device.
630  * @dev: Device to handle.
631  * @state: PM transition of the system being carried out.
632  * @async: If true, the device is being resumed asynchronously.
633  *
634  * The driver of @dev will not receive interrupts while this function is being
635  * executed.
636  */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)637 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
638 {
639 	pm_callback_t callback;
640 	const char *info;
641 	bool skip_resume;
642 	int error = 0;
643 
644 	TRACE_DEVICE(dev);
645 	TRACE_RESUME(0);
646 
647 	if (dev->power.syscore || dev->power.direct_complete)
648 		goto Out;
649 
650 	if (!dev->power.is_noirq_suspended)
651 		goto Out;
652 
653 	if (!dpm_wait_for_superior(dev, async))
654 		goto Out;
655 
656 	skip_resume = dev_pm_may_skip_resume(dev);
657 
658 	callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
659 	if (callback)
660 		goto Run;
661 
662 	if (skip_resume)
663 		goto Skip;
664 
665 	if (dev_pm_smart_suspend_and_suspended(dev)) {
666 		pm_message_t suspend_msg = suspend_event(state);
667 
668 		/*
669 		 * If "freeze" callbacks have been skipped during a transition
670 		 * related to hibernation, the subsequent "thaw" callbacks must
671 		 * be skipped too or bad things may happen.  Otherwise, resume
672 		 * callbacks are going to be run for the device, so its runtime
673 		 * PM status must be changed to reflect the new state after the
674 		 * transition under way.
675 		 */
676 		if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
677 		    !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
678 			if (state.event == PM_EVENT_THAW) {
679 				skip_resume = true;
680 				goto Skip;
681 			} else {
682 				pm_runtime_set_active(dev);
683 			}
684 		}
685 	}
686 
687 	if (dev->driver && dev->driver->pm) {
688 		info = "noirq driver ";
689 		callback = pm_noirq_op(dev->driver->pm, state);
690 	}
691 
692 Run:
693 	error = dpm_run_callback(callback, dev, state, info);
694 
695 Skip:
696 	dev->power.is_noirq_suspended = false;
697 
698 	if (skip_resume) {
699 		/* Make the next phases of resume skip the device. */
700 		dev->power.is_late_suspended = false;
701 		dev->power.is_suspended = false;
702 		/*
703 		 * The device is going to be left in suspend, but it might not
704 		 * have been in runtime suspend before the system suspended, so
705 		 * its runtime PM status needs to be updated to avoid confusing
706 		 * the runtime PM framework when runtime PM is enabled for the
707 		 * device again.
708 		 */
709 		pm_runtime_set_suspended(dev);
710 	}
711 
712 Out:
713 	complete_all(&dev->power.completion);
714 	TRACE_RESUME(error);
715 	return error;
716 }
717 
is_async(struct device * dev)718 static bool is_async(struct device *dev)
719 {
720 	return dev->power.async_suspend && pm_async_enabled
721 		&& !pm_trace_is_enabled();
722 }
723 
dpm_async_fn(struct device * dev,async_func_t func)724 static bool dpm_async_fn(struct device *dev, async_func_t func)
725 {
726 	reinit_completion(&dev->power.completion);
727 
728 	if (is_async(dev)) {
729 		get_device(dev);
730 		async_schedule_dev(func, dev);
731 		return true;
732 	}
733 
734 	return false;
735 }
736 
async_resume_noirq(void * data,async_cookie_t cookie)737 static void async_resume_noirq(void *data, async_cookie_t cookie)
738 {
739 	struct device *dev = (struct device *)data;
740 	int error;
741 
742 	error = device_resume_noirq(dev, pm_transition, true);
743 	if (error)
744 		pm_dev_err(dev, pm_transition, " async", error);
745 
746 	put_device(dev);
747 }
748 
dpm_noirq_resume_devices(pm_message_t state)749 static void dpm_noirq_resume_devices(pm_message_t state)
750 {
751 	struct device *dev;
752 	ktime_t starttime = ktime_get();
753 
754 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
755 	mutex_lock(&dpm_list_mtx);
756 	pm_transition = state;
757 
758 	/*
759 	 * Advanced the async threads upfront,
760 	 * in case the starting of async threads is
761 	 * delayed by non-async resuming devices.
762 	 */
763 	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
764 		dpm_async_fn(dev, async_resume_noirq);
765 
766 	while (!list_empty(&dpm_noirq_list)) {
767 		dev = to_device(dpm_noirq_list.next);
768 		get_device(dev);
769 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
770 		mutex_unlock(&dpm_list_mtx);
771 
772 		if (!is_async(dev)) {
773 			int error;
774 
775 			error = device_resume_noirq(dev, state, false);
776 			if (error) {
777 				suspend_stats.failed_resume_noirq++;
778 				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
779 				dpm_save_failed_dev(dev_name(dev));
780 				pm_dev_err(dev, state, " noirq", error);
781 			}
782 		}
783 
784 		mutex_lock(&dpm_list_mtx);
785 		put_device(dev);
786 	}
787 	mutex_unlock(&dpm_list_mtx);
788 	async_synchronize_full();
789 	dpm_show_time(starttime, state, 0, "noirq");
790 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
791 }
792 
793 /**
794  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
795  * @state: PM transition of the system being carried out.
796  *
797  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
798  * allow device drivers' interrupt handlers to be called.
799  */
dpm_resume_noirq(pm_message_t state)800 void dpm_resume_noirq(pm_message_t state)
801 {
802 	dpm_noirq_resume_devices(state);
803 
804 	resume_device_irqs();
805 	device_wakeup_disarm_wake_irqs();
806 
807 	cpuidle_resume();
808 }
809 
dpm_subsys_resume_early_cb(struct device * dev,pm_message_t state,const char ** info_p)810 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
811 						pm_message_t state,
812 						const char **info_p)
813 {
814 	pm_callback_t callback;
815 	const char *info;
816 
817 	if (dev->pm_domain) {
818 		info = "early power domain ";
819 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
820 	} else if (dev->type && dev->type->pm) {
821 		info = "early type ";
822 		callback = pm_late_early_op(dev->type->pm, state);
823 	} else if (dev->class && dev->class->pm) {
824 		info = "early class ";
825 		callback = pm_late_early_op(dev->class->pm, state);
826 	} else if (dev->bus && dev->bus->pm) {
827 		info = "early bus ";
828 		callback = pm_late_early_op(dev->bus->pm, state);
829 	} else {
830 		return NULL;
831 	}
832 
833 	if (info_p)
834 		*info_p = info;
835 
836 	return callback;
837 }
838 
839 /**
840  * device_resume_early - Execute an "early resume" callback for given device.
841  * @dev: Device to handle.
842  * @state: PM transition of the system being carried out.
843  * @async: If true, the device is being resumed asynchronously.
844  *
845  * Runtime PM is disabled for @dev while this function is being executed.
846  */
device_resume_early(struct device * dev,pm_message_t state,bool async)847 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
848 {
849 	pm_callback_t callback;
850 	const char *info;
851 	int error = 0;
852 
853 	TRACE_DEVICE(dev);
854 	TRACE_RESUME(0);
855 
856 	if (dev->power.syscore || dev->power.direct_complete)
857 		goto Out;
858 
859 	if (!dev->power.is_late_suspended)
860 		goto Out;
861 
862 	if (!dpm_wait_for_superior(dev, async))
863 		goto Out;
864 
865 	callback = dpm_subsys_resume_early_cb(dev, state, &info);
866 
867 	if (!callback && dev->driver && dev->driver->pm) {
868 		info = "early driver ";
869 		callback = pm_late_early_op(dev->driver->pm, state);
870 	}
871 
872 	error = dpm_run_callback(callback, dev, state, info);
873 	dev->power.is_late_suspended = false;
874 
875  Out:
876 	TRACE_RESUME(error);
877 
878 	pm_runtime_enable(dev);
879 	complete_all(&dev->power.completion);
880 	return error;
881 }
882 
async_resume_early(void * data,async_cookie_t cookie)883 static void async_resume_early(void *data, async_cookie_t cookie)
884 {
885 	struct device *dev = (struct device *)data;
886 	int error;
887 
888 	error = device_resume_early(dev, pm_transition, true);
889 	if (error)
890 		pm_dev_err(dev, pm_transition, " async", error);
891 
892 	put_device(dev);
893 }
894 
895 /**
896  * dpm_resume_early - Execute "early resume" callbacks for all devices.
897  * @state: PM transition of the system being carried out.
898  */
dpm_resume_early(pm_message_t state)899 void dpm_resume_early(pm_message_t state)
900 {
901 	struct device *dev;
902 	ktime_t starttime = ktime_get();
903 
904 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
905 	mutex_lock(&dpm_list_mtx);
906 	pm_transition = state;
907 
908 	/*
909 	 * Advanced the async threads upfront,
910 	 * in case the starting of async threads is
911 	 * delayed by non-async resuming devices.
912 	 */
913 	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
914 		dpm_async_fn(dev, async_resume_early);
915 
916 	while (!list_empty(&dpm_late_early_list)) {
917 		dev = to_device(dpm_late_early_list.next);
918 		get_device(dev);
919 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
920 		mutex_unlock(&dpm_list_mtx);
921 
922 		if (!is_async(dev)) {
923 			int error;
924 
925 			error = device_resume_early(dev, state, false);
926 			if (error) {
927 				suspend_stats.failed_resume_early++;
928 				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
929 				dpm_save_failed_dev(dev_name(dev));
930 				pm_dev_err(dev, state, " early", error);
931 			}
932 		}
933 		mutex_lock(&dpm_list_mtx);
934 		put_device(dev);
935 	}
936 	mutex_unlock(&dpm_list_mtx);
937 	async_synchronize_full();
938 	dpm_show_time(starttime, state, 0, "early");
939 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
940 }
941 
942 /**
943  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
944  * @state: PM transition of the system being carried out.
945  */
dpm_resume_start(pm_message_t state)946 void dpm_resume_start(pm_message_t state)
947 {
948 	dpm_resume_noirq(state);
949 	dpm_resume_early(state);
950 }
951 EXPORT_SYMBOL_GPL(dpm_resume_start);
952 
953 /**
954  * device_resume - Execute "resume" callbacks for given device.
955  * @dev: Device to handle.
956  * @state: PM transition of the system being carried out.
957  * @async: If true, the device is being resumed asynchronously.
958  */
device_resume(struct device * dev,pm_message_t state,bool async)959 static int device_resume(struct device *dev, pm_message_t state, bool async)
960 {
961 	pm_callback_t callback = NULL;
962 	const char *info = NULL;
963 	int error = 0;
964 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
965 
966 	TRACE_DEVICE(dev);
967 	TRACE_RESUME(0);
968 
969 	if (dev->power.syscore)
970 		goto Complete;
971 
972 	if (dev->power.direct_complete) {
973 		/* Match the pm_runtime_disable() in __device_suspend(). */
974 		pm_runtime_enable(dev);
975 		goto Complete;
976 	}
977 
978 	if (!dpm_wait_for_superior(dev, async))
979 		goto Complete;
980 
981 	dpm_watchdog_set(&wd, dev);
982 	device_lock(dev);
983 
984 	/*
985 	 * This is a fib.  But we'll allow new children to be added below
986 	 * a resumed device, even if the device hasn't been completed yet.
987 	 */
988 	dev->power.is_prepared = false;
989 
990 	if (!dev->power.is_suspended)
991 		goto Unlock;
992 
993 	if (dev->pm_domain) {
994 		info = "power domain ";
995 		callback = pm_op(&dev->pm_domain->ops, state);
996 		goto Driver;
997 	}
998 
999 	if (dev->type && dev->type->pm) {
1000 		info = "type ";
1001 		callback = pm_op(dev->type->pm, state);
1002 		goto Driver;
1003 	}
1004 
1005 	if (dev->class && dev->class->pm) {
1006 		info = "class ";
1007 		callback = pm_op(dev->class->pm, state);
1008 		goto Driver;
1009 	}
1010 
1011 	if (dev->bus) {
1012 		if (dev->bus->pm) {
1013 			info = "bus ";
1014 			callback = pm_op(dev->bus->pm, state);
1015 		} else if (dev->bus->resume) {
1016 			info = "legacy bus ";
1017 			callback = dev->bus->resume;
1018 			goto End;
1019 		}
1020 	}
1021 
1022  Driver:
1023 	if (!callback && dev->driver && dev->driver->pm) {
1024 		info = "driver ";
1025 		callback = pm_op(dev->driver->pm, state);
1026 	}
1027 
1028  End:
1029 	error = dpm_run_callback(callback, dev, state, info);
1030 	dev->power.is_suspended = false;
1031 
1032  Unlock:
1033 	device_unlock(dev);
1034 	dpm_watchdog_clear(&wd);
1035 
1036  Complete:
1037 	complete_all(&dev->power.completion);
1038 
1039 	TRACE_RESUME(error);
1040 
1041 	return error;
1042 }
1043 
async_resume(void * data,async_cookie_t cookie)1044 static void async_resume(void *data, async_cookie_t cookie)
1045 {
1046 	struct device *dev = (struct device *)data;
1047 	int error;
1048 
1049 	error = device_resume(dev, pm_transition, true);
1050 	if (error)
1051 		pm_dev_err(dev, pm_transition, " async", error);
1052 	put_device(dev);
1053 }
1054 
1055 /**
1056  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1057  * @state: PM transition of the system being carried out.
1058  *
1059  * Execute the appropriate "resume" callback for all devices whose status
1060  * indicates that they are suspended.
1061  */
dpm_resume(pm_message_t state)1062 void dpm_resume(pm_message_t state)
1063 {
1064 	struct device *dev;
1065 	ktime_t starttime = ktime_get();
1066 
1067 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1068 	might_sleep();
1069 
1070 	mutex_lock(&dpm_list_mtx);
1071 	pm_transition = state;
1072 	async_error = 0;
1073 
1074 	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1075 		dpm_async_fn(dev, async_resume);
1076 
1077 	while (!list_empty(&dpm_suspended_list)) {
1078 		dev = to_device(dpm_suspended_list.next);
1079 		get_device(dev);
1080 		if (!is_async(dev)) {
1081 			int error;
1082 
1083 			mutex_unlock(&dpm_list_mtx);
1084 
1085 			error = device_resume(dev, state, false);
1086 			if (error) {
1087 				suspend_stats.failed_resume++;
1088 				dpm_save_failed_step(SUSPEND_RESUME);
1089 				dpm_save_failed_dev(dev_name(dev));
1090 				pm_dev_err(dev, state, "", error);
1091 			}
1092 
1093 			mutex_lock(&dpm_list_mtx);
1094 		}
1095 		if (!list_empty(&dev->power.entry))
1096 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1097 		put_device(dev);
1098 	}
1099 	mutex_unlock(&dpm_list_mtx);
1100 	async_synchronize_full();
1101 	dpm_show_time(starttime, state, 0, NULL);
1102 
1103 	cpufreq_resume();
1104 	devfreq_resume();
1105 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1106 }
1107 
1108 /**
1109  * device_complete - Complete a PM transition for given device.
1110  * @dev: Device to handle.
1111  * @state: PM transition of the system being carried out.
1112  */
device_complete(struct device * dev,pm_message_t state)1113 static void device_complete(struct device *dev, pm_message_t state)
1114 {
1115 	void (*callback)(struct device *) = NULL;
1116 	const char *info = NULL;
1117 
1118 	if (dev->power.syscore)
1119 		return;
1120 
1121 	device_lock(dev);
1122 
1123 	if (dev->pm_domain) {
1124 		info = "completing power domain ";
1125 		callback = dev->pm_domain->ops.complete;
1126 	} else if (dev->type && dev->type->pm) {
1127 		info = "completing type ";
1128 		callback = dev->type->pm->complete;
1129 	} else if (dev->class && dev->class->pm) {
1130 		info = "completing class ";
1131 		callback = dev->class->pm->complete;
1132 	} else if (dev->bus && dev->bus->pm) {
1133 		info = "completing bus ";
1134 		callback = dev->bus->pm->complete;
1135 	}
1136 
1137 	if (!callback && dev->driver && dev->driver->pm) {
1138 		info = "completing driver ";
1139 		callback = dev->driver->pm->complete;
1140 	}
1141 
1142 	if (callback) {
1143 		pm_dev_dbg(dev, state, info);
1144 		callback(dev);
1145 	}
1146 
1147 	device_unlock(dev);
1148 
1149 	pm_runtime_put(dev);
1150 }
1151 
1152 /**
1153  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1154  * @state: PM transition of the system being carried out.
1155  *
1156  * Execute the ->complete() callbacks for all devices whose PM status is not
1157  * DPM_ON (this allows new devices to be registered).
1158  */
dpm_complete(pm_message_t state)1159 void dpm_complete(pm_message_t state)
1160 {
1161 	struct list_head list;
1162 
1163 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1164 	might_sleep();
1165 
1166 	INIT_LIST_HEAD(&list);
1167 	mutex_lock(&dpm_list_mtx);
1168 	while (!list_empty(&dpm_prepared_list)) {
1169 		struct device *dev = to_device(dpm_prepared_list.prev);
1170 
1171 		get_device(dev);
1172 		dev->power.is_prepared = false;
1173 		list_move(&dev->power.entry, &list);
1174 		mutex_unlock(&dpm_list_mtx);
1175 
1176 		trace_device_pm_callback_start(dev, "", state.event);
1177 		device_complete(dev, state);
1178 		trace_device_pm_callback_end(dev, 0);
1179 
1180 		mutex_lock(&dpm_list_mtx);
1181 		put_device(dev);
1182 	}
1183 	list_splice(&list, &dpm_list);
1184 	mutex_unlock(&dpm_list_mtx);
1185 
1186 	/* Allow device probing and trigger re-probing of deferred devices */
1187 	device_unblock_probing();
1188 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1189 }
1190 
1191 /**
1192  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1193  * @state: PM transition of the system being carried out.
1194  *
1195  * Execute "resume" callbacks for all devices and complete the PM transition of
1196  * the system.
1197  */
dpm_resume_end(pm_message_t state)1198 void dpm_resume_end(pm_message_t state)
1199 {
1200 	dpm_resume(state);
1201 	dpm_complete(state);
1202 }
1203 EXPORT_SYMBOL_GPL(dpm_resume_end);
1204 
1205 
1206 /*------------------------- Suspend routines -------------------------*/
1207 
1208 /**
1209  * resume_event - Return a "resume" message for given "suspend" sleep state.
1210  * @sleep_state: PM message representing a sleep state.
1211  *
1212  * Return a PM message representing the resume event corresponding to given
1213  * sleep state.
1214  */
resume_event(pm_message_t sleep_state)1215 static pm_message_t resume_event(pm_message_t sleep_state)
1216 {
1217 	switch (sleep_state.event) {
1218 	case PM_EVENT_SUSPEND:
1219 		return PMSG_RESUME;
1220 	case PM_EVENT_FREEZE:
1221 	case PM_EVENT_QUIESCE:
1222 		return PMSG_RECOVER;
1223 	case PM_EVENT_HIBERNATE:
1224 		return PMSG_RESTORE;
1225 	}
1226 	return PMSG_ON;
1227 }
1228 
dpm_superior_set_must_resume(struct device * dev)1229 static void dpm_superior_set_must_resume(struct device *dev)
1230 {
1231 	struct device_link *link;
1232 	int idx;
1233 
1234 	if (dev->parent)
1235 		dev->parent->power.must_resume = true;
1236 
1237 	idx = device_links_read_lock();
1238 
1239 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1240 		link->supplier->power.must_resume = true;
1241 
1242 	device_links_read_unlock(idx);
1243 }
1244 
dpm_subsys_suspend_noirq_cb(struct device * dev,pm_message_t state,const char ** info_p)1245 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1246 						 pm_message_t state,
1247 						 const char **info_p)
1248 {
1249 	pm_callback_t callback;
1250 	const char *info;
1251 
1252 	if (dev->pm_domain) {
1253 		info = "noirq power domain ";
1254 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1255 	} else if (dev->type && dev->type->pm) {
1256 		info = "noirq type ";
1257 		callback = pm_noirq_op(dev->type->pm, state);
1258 	} else if (dev->class && dev->class->pm) {
1259 		info = "noirq class ";
1260 		callback = pm_noirq_op(dev->class->pm, state);
1261 	} else if (dev->bus && dev->bus->pm) {
1262 		info = "noirq bus ";
1263 		callback = pm_noirq_op(dev->bus->pm, state);
1264 	} else {
1265 		return NULL;
1266 	}
1267 
1268 	if (info_p)
1269 		*info_p = info;
1270 
1271 	return callback;
1272 }
1273 
device_must_resume(struct device * dev,pm_message_t state,bool no_subsys_suspend_noirq)1274 static bool device_must_resume(struct device *dev, pm_message_t state,
1275 			       bool no_subsys_suspend_noirq)
1276 {
1277 	pm_message_t resume_msg = resume_event(state);
1278 
1279 	/*
1280 	 * If all of the device driver's "noirq", "late" and "early" callbacks
1281 	 * are invoked directly by the core, the decision to allow the device to
1282 	 * stay in suspend can be based on its current runtime PM status and its
1283 	 * wakeup settings.
1284 	 */
1285 	if (no_subsys_suspend_noirq &&
1286 	    !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1287 	    !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1288 	    !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1289 		return !pm_runtime_status_suspended(dev) &&
1290 			(resume_msg.event != PM_EVENT_RESUME ||
1291 			 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1292 
1293 	/*
1294 	 * The only safe strategy here is to require that if the device may not
1295 	 * be left in suspend, resume callbacks must be invoked for it.
1296 	 */
1297 	return !dev->power.may_skip_resume;
1298 }
1299 
1300 /**
1301  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1302  * @dev: Device to handle.
1303  * @state: PM transition of the system being carried out.
1304  * @async: If true, the device is being suspended asynchronously.
1305  *
1306  * The driver of @dev will not receive interrupts while this function is being
1307  * executed.
1308  */
__device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1309 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1310 {
1311 	pm_callback_t callback;
1312 	const char *info;
1313 	bool no_subsys_cb = false;
1314 	int error = 0;
1315 
1316 	TRACE_DEVICE(dev);
1317 	TRACE_SUSPEND(0);
1318 
1319 	dpm_wait_for_subordinate(dev, async);
1320 
1321 	if (async_error)
1322 		goto Complete;
1323 
1324 	if (dev->power.syscore || dev->power.direct_complete)
1325 		goto Complete;
1326 
1327 	callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1328 	if (callback)
1329 		goto Run;
1330 
1331 	no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1332 
1333 	if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1334 		goto Skip;
1335 
1336 	if (dev->driver && dev->driver->pm) {
1337 		info = "noirq driver ";
1338 		callback = pm_noirq_op(dev->driver->pm, state);
1339 	}
1340 
1341 Run:
1342 	error = dpm_run_callback(callback, dev, state, info);
1343 	if (error) {
1344 		async_error = error;
1345 		log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
1346 					 dev_name(dev), callback, error);
1347 		goto Complete;
1348 	}
1349 
1350 Skip:
1351 	dev->power.is_noirq_suspended = true;
1352 
1353 	if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1354 		dev->power.must_resume = dev->power.must_resume ||
1355 				atomic_read(&dev->power.usage_count) > 1 ||
1356 				device_must_resume(dev, state, no_subsys_cb);
1357 	} else {
1358 		dev->power.must_resume = true;
1359 	}
1360 
1361 	if (dev->power.must_resume)
1362 		dpm_superior_set_must_resume(dev);
1363 
1364 Complete:
1365 	complete_all(&dev->power.completion);
1366 	TRACE_SUSPEND(error);
1367 	return error;
1368 }
1369 
async_suspend_noirq(void * data,async_cookie_t cookie)1370 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1371 {
1372 	struct device *dev = (struct device *)data;
1373 	int error;
1374 
1375 	error = __device_suspend_noirq(dev, pm_transition, true);
1376 	if (error) {
1377 		dpm_save_failed_dev(dev_name(dev));
1378 		pm_dev_err(dev, pm_transition, " async", error);
1379 	}
1380 
1381 	put_device(dev);
1382 }
1383 
device_suspend_noirq(struct device * dev)1384 static int device_suspend_noirq(struct device *dev)
1385 {
1386 	if (dpm_async_fn(dev, async_suspend_noirq))
1387 		return 0;
1388 
1389 	return __device_suspend_noirq(dev, pm_transition, false);
1390 }
1391 
dpm_noirq_suspend_devices(pm_message_t state)1392 static int dpm_noirq_suspend_devices(pm_message_t state)
1393 {
1394 	ktime_t starttime = ktime_get();
1395 	int error = 0;
1396 
1397 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1398 	mutex_lock(&dpm_list_mtx);
1399 	pm_transition = state;
1400 	async_error = 0;
1401 
1402 	while (!list_empty(&dpm_late_early_list)) {
1403 		struct device *dev = to_device(dpm_late_early_list.prev);
1404 
1405 		get_device(dev);
1406 		mutex_unlock(&dpm_list_mtx);
1407 
1408 		error = device_suspend_noirq(dev);
1409 
1410 		mutex_lock(&dpm_list_mtx);
1411 		if (error) {
1412 			pm_dev_err(dev, state, " noirq", error);
1413 			dpm_save_failed_dev(dev_name(dev));
1414 			put_device(dev);
1415 			break;
1416 		}
1417 		if (!list_empty(&dev->power.entry))
1418 			list_move(&dev->power.entry, &dpm_noirq_list);
1419 		put_device(dev);
1420 
1421 		if (async_error)
1422 			break;
1423 	}
1424 	mutex_unlock(&dpm_list_mtx);
1425 	async_synchronize_full();
1426 	if (!error)
1427 		error = async_error;
1428 
1429 	if (error) {
1430 		suspend_stats.failed_suspend_noirq++;
1431 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1432 	}
1433 	dpm_show_time(starttime, state, error, "noirq");
1434 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1435 	return error;
1436 }
1437 
1438 /**
1439  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1440  * @state: PM transition of the system being carried out.
1441  *
1442  * Prevent device drivers' interrupt handlers from being called and invoke
1443  * "noirq" suspend callbacks for all non-sysdev devices.
1444  */
dpm_suspend_noirq(pm_message_t state)1445 int dpm_suspend_noirq(pm_message_t state)
1446 {
1447 	int ret;
1448 
1449 	cpuidle_pause();
1450 
1451 	device_wakeup_arm_wake_irqs();
1452 	suspend_device_irqs();
1453 
1454 	ret = dpm_noirq_suspend_devices(state);
1455 	if (ret)
1456 		dpm_resume_noirq(resume_event(state));
1457 
1458 	return ret;
1459 }
1460 
dpm_propagate_wakeup_to_parent(struct device * dev)1461 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1462 {
1463 	struct device *parent = dev->parent;
1464 
1465 	if (!parent)
1466 		return;
1467 
1468 	spin_lock_irq(&parent->power.lock);
1469 
1470 	if (dev->power.wakeup_path && !parent->power.ignore_children)
1471 		parent->power.wakeup_path = true;
1472 
1473 	spin_unlock_irq(&parent->power.lock);
1474 }
1475 
dpm_subsys_suspend_late_cb(struct device * dev,pm_message_t state,const char ** info_p)1476 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1477 						pm_message_t state,
1478 						const char **info_p)
1479 {
1480 	pm_callback_t callback;
1481 	const char *info;
1482 
1483 	if (dev->pm_domain) {
1484 		info = "late power domain ";
1485 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1486 	} else if (dev->type && dev->type->pm) {
1487 		info = "late type ";
1488 		callback = pm_late_early_op(dev->type->pm, state);
1489 	} else if (dev->class && dev->class->pm) {
1490 		info = "late class ";
1491 		callback = pm_late_early_op(dev->class->pm, state);
1492 	} else if (dev->bus && dev->bus->pm) {
1493 		info = "late bus ";
1494 		callback = pm_late_early_op(dev->bus->pm, state);
1495 	} else {
1496 		return NULL;
1497 	}
1498 
1499 	if (info_p)
1500 		*info_p = info;
1501 
1502 	return callback;
1503 }
1504 
1505 /**
1506  * __device_suspend_late - Execute a "late suspend" callback for given device.
1507  * @dev: Device to handle.
1508  * @state: PM transition of the system being carried out.
1509  * @async: If true, the device is being suspended asynchronously.
1510  *
1511  * Runtime PM is disabled for @dev while this function is being executed.
1512  */
__device_suspend_late(struct device * dev,pm_message_t state,bool async)1513 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1514 {
1515 	pm_callback_t callback;
1516 	const char *info;
1517 	int error = 0;
1518 
1519 	TRACE_DEVICE(dev);
1520 	TRACE_SUSPEND(0);
1521 
1522 	__pm_runtime_disable(dev, false);
1523 
1524 	dpm_wait_for_subordinate(dev, async);
1525 
1526 	if (async_error)
1527 		goto Complete;
1528 
1529 	if (pm_wakeup_pending()) {
1530 		async_error = -EBUSY;
1531 		goto Complete;
1532 	}
1533 
1534 	if (dev->power.syscore || dev->power.direct_complete)
1535 		goto Complete;
1536 
1537 	callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1538 	if (callback)
1539 		goto Run;
1540 
1541 	if (dev_pm_smart_suspend_and_suspended(dev) &&
1542 	    !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1543 		goto Skip;
1544 
1545 	if (dev->driver && dev->driver->pm) {
1546 		info = "late driver ";
1547 		callback = pm_late_early_op(dev->driver->pm, state);
1548 	}
1549 
1550 Run:
1551 	error = dpm_run_callback(callback, dev, state, info);
1552 	if (error) {
1553 		async_error = error;
1554 		log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
1555 					 dev_name(dev), callback, error);
1556 		goto Complete;
1557 	}
1558 	dpm_propagate_wakeup_to_parent(dev);
1559 
1560 Skip:
1561 	dev->power.is_late_suspended = true;
1562 
1563 Complete:
1564 	TRACE_SUSPEND(error);
1565 	complete_all(&dev->power.completion);
1566 	return error;
1567 }
1568 
async_suspend_late(void * data,async_cookie_t cookie)1569 static void async_suspend_late(void *data, async_cookie_t cookie)
1570 {
1571 	struct device *dev = (struct device *)data;
1572 	int error;
1573 
1574 	error = __device_suspend_late(dev, pm_transition, true);
1575 	if (error) {
1576 		dpm_save_failed_dev(dev_name(dev));
1577 		pm_dev_err(dev, pm_transition, " async", error);
1578 	}
1579 	put_device(dev);
1580 }
1581 
device_suspend_late(struct device * dev)1582 static int device_suspend_late(struct device *dev)
1583 {
1584 	if (dpm_async_fn(dev, async_suspend_late))
1585 		return 0;
1586 
1587 	return __device_suspend_late(dev, pm_transition, false);
1588 }
1589 
1590 /**
1591  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1592  * @state: PM transition of the system being carried out.
1593  */
dpm_suspend_late(pm_message_t state)1594 int dpm_suspend_late(pm_message_t state)
1595 {
1596 	ktime_t starttime = ktime_get();
1597 	int error = 0;
1598 
1599 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1600 	mutex_lock(&dpm_list_mtx);
1601 	pm_transition = state;
1602 	async_error = 0;
1603 
1604 	while (!list_empty(&dpm_suspended_list)) {
1605 		struct device *dev = to_device(dpm_suspended_list.prev);
1606 
1607 		get_device(dev);
1608 		mutex_unlock(&dpm_list_mtx);
1609 
1610 		error = device_suspend_late(dev);
1611 
1612 		mutex_lock(&dpm_list_mtx);
1613 		if (!list_empty(&dev->power.entry))
1614 			list_move(&dev->power.entry, &dpm_late_early_list);
1615 
1616 		if (error) {
1617 			pm_dev_err(dev, state, " late", error);
1618 			dpm_save_failed_dev(dev_name(dev));
1619 			put_device(dev);
1620 			break;
1621 		}
1622 		put_device(dev);
1623 
1624 		if (async_error)
1625 			break;
1626 	}
1627 	mutex_unlock(&dpm_list_mtx);
1628 	async_synchronize_full();
1629 	if (!error)
1630 		error = async_error;
1631 	if (error) {
1632 		suspend_stats.failed_suspend_late++;
1633 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1634 		dpm_resume_early(resume_event(state));
1635 	}
1636 	dpm_show_time(starttime, state, error, "late");
1637 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1638 	return error;
1639 }
1640 
1641 /**
1642  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1643  * @state: PM transition of the system being carried out.
1644  */
dpm_suspend_end(pm_message_t state)1645 int dpm_suspend_end(pm_message_t state)
1646 {
1647 	ktime_t starttime = ktime_get();
1648 	int error;
1649 
1650 	error = dpm_suspend_late(state);
1651 	if (error)
1652 		goto out;
1653 
1654 	error = dpm_suspend_noirq(state);
1655 	if (error)
1656 		dpm_resume_early(resume_event(state));
1657 
1658 out:
1659 	dpm_show_time(starttime, state, error, "end");
1660 	return error;
1661 }
1662 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1663 
1664 /**
1665  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1666  * @dev: Device to suspend.
1667  * @state: PM transition of the system being carried out.
1668  * @cb: Suspend callback to execute.
1669  * @info: string description of caller.
1670  */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1671 static int legacy_suspend(struct device *dev, pm_message_t state,
1672 			  int (*cb)(struct device *dev, pm_message_t state),
1673 			  const char *info)
1674 {
1675 	int error;
1676 	ktime_t calltime;
1677 
1678 	calltime = initcall_debug_start(dev, cb);
1679 
1680 	trace_device_pm_callback_start(dev, info, state.event);
1681 	error = cb(dev, state);
1682 	trace_device_pm_callback_end(dev, error);
1683 	suspend_report_result(cb, error);
1684 
1685 	initcall_debug_report(dev, calltime, cb, error);
1686 
1687 	return error;
1688 }
1689 
dpm_clear_superiors_direct_complete(struct device * dev)1690 static void dpm_clear_superiors_direct_complete(struct device *dev)
1691 {
1692 	struct device_link *link;
1693 	int idx;
1694 
1695 	if (dev->parent) {
1696 		spin_lock_irq(&dev->parent->power.lock);
1697 		dev->parent->power.direct_complete = false;
1698 		spin_unlock_irq(&dev->parent->power.lock);
1699 	}
1700 
1701 	idx = device_links_read_lock();
1702 
1703 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1704 		spin_lock_irq(&link->supplier->power.lock);
1705 		link->supplier->power.direct_complete = false;
1706 		spin_unlock_irq(&link->supplier->power.lock);
1707 	}
1708 
1709 	device_links_read_unlock(idx);
1710 }
1711 
1712 /**
1713  * __device_suspend - Execute "suspend" callbacks for given device.
1714  * @dev: Device to handle.
1715  * @state: PM transition of the system being carried out.
1716  * @async: If true, the device is being suspended asynchronously.
1717  */
__device_suspend(struct device * dev,pm_message_t state,bool async)1718 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1719 {
1720 	pm_callback_t callback = NULL;
1721 	const char *info = NULL;
1722 	int error = 0;
1723 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1724 
1725 	TRACE_DEVICE(dev);
1726 	TRACE_SUSPEND(0);
1727 
1728 	dpm_wait_for_subordinate(dev, async);
1729 
1730 	if (async_error) {
1731 		dev->power.direct_complete = false;
1732 		goto Complete;
1733 	}
1734 
1735 	/*
1736 	 * Wait for possible runtime PM transitions of the device in progress
1737 	 * to complete and if there's a runtime resume request pending for it,
1738 	 * resume it before proceeding with invoking the system-wide suspend
1739 	 * callbacks for it.
1740 	 *
1741 	 * If the system-wide suspend callbacks below change the configuration
1742 	 * of the device, they must disable runtime PM for it or otherwise
1743 	 * ensure that its runtime-resume callbacks will not be confused by that
1744 	 * change in case they are invoked going forward.
1745 	 */
1746 	pm_runtime_barrier(dev);
1747 
1748 	if (pm_wakeup_pending()) {
1749 		dev->power.direct_complete = false;
1750 		async_error = -EBUSY;
1751 		goto Complete;
1752 	}
1753 
1754 	if (dev->power.syscore)
1755 		goto Complete;
1756 
1757 	/* Avoid direct_complete to let wakeup_path propagate. */
1758 	if (device_may_wakeup(dev) || dev->power.wakeup_path)
1759 		dev->power.direct_complete = false;
1760 
1761 	if (dev->power.direct_complete) {
1762 		if (pm_runtime_status_suspended(dev)) {
1763 			pm_runtime_disable(dev);
1764 			if (pm_runtime_status_suspended(dev)) {
1765 				pm_dev_dbg(dev, state, "direct-complete ");
1766 				goto Complete;
1767 			}
1768 
1769 			pm_runtime_enable(dev);
1770 		}
1771 		dev->power.direct_complete = false;
1772 	}
1773 
1774 	dev->power.may_skip_resume = false;
1775 	dev->power.must_resume = false;
1776 
1777 	dpm_watchdog_set(&wd, dev);
1778 	device_lock(dev);
1779 
1780 	if (dev->pm_domain) {
1781 		info = "power domain ";
1782 		callback = pm_op(&dev->pm_domain->ops, state);
1783 		goto Run;
1784 	}
1785 
1786 	if (dev->type && dev->type->pm) {
1787 		info = "type ";
1788 		callback = pm_op(dev->type->pm, state);
1789 		goto Run;
1790 	}
1791 
1792 	if (dev->class && dev->class->pm) {
1793 		info = "class ";
1794 		callback = pm_op(dev->class->pm, state);
1795 		goto Run;
1796 	}
1797 
1798 	if (dev->bus) {
1799 		if (dev->bus->pm) {
1800 			info = "bus ";
1801 			callback = pm_op(dev->bus->pm, state);
1802 		} else if (dev->bus->suspend) {
1803 			pm_dev_dbg(dev, state, "legacy bus ");
1804 			error = legacy_suspend(dev, state, dev->bus->suspend,
1805 						"legacy bus ");
1806 			goto End;
1807 		}
1808 	}
1809 
1810  Run:
1811 	if (!callback && dev->driver && dev->driver->pm) {
1812 		info = "driver ";
1813 		callback = pm_op(dev->driver->pm, state);
1814 	}
1815 
1816 	error = dpm_run_callback(callback, dev, state, info);
1817 
1818  End:
1819 	if (!error) {
1820 		dev->power.is_suspended = true;
1821 		if (device_may_wakeup(dev))
1822 			dev->power.wakeup_path = true;
1823 
1824 		dpm_propagate_wakeup_to_parent(dev);
1825 		dpm_clear_superiors_direct_complete(dev);
1826 	} else {
1827 		log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
1828 					 dev_name(dev), callback, error);
1829 	}
1830 
1831 	device_unlock(dev);
1832 	dpm_watchdog_clear(&wd);
1833 
1834  Complete:
1835 	if (error)
1836 		async_error = error;
1837 
1838 	complete_all(&dev->power.completion);
1839 	TRACE_SUSPEND(error);
1840 	return error;
1841 }
1842 
async_suspend(void * data,async_cookie_t cookie)1843 static void async_suspend(void *data, async_cookie_t cookie)
1844 {
1845 	struct device *dev = (struct device *)data;
1846 	int error;
1847 
1848 	error = __device_suspend(dev, pm_transition, true);
1849 	if (error) {
1850 		dpm_save_failed_dev(dev_name(dev));
1851 		pm_dev_err(dev, pm_transition, " async", error);
1852 	}
1853 
1854 	put_device(dev);
1855 }
1856 
device_suspend(struct device * dev)1857 static int device_suspend(struct device *dev)
1858 {
1859 	if (dpm_async_fn(dev, async_suspend))
1860 		return 0;
1861 
1862 	return __device_suspend(dev, pm_transition, false);
1863 }
1864 
1865 /**
1866  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1867  * @state: PM transition of the system being carried out.
1868  */
dpm_suspend(pm_message_t state)1869 int dpm_suspend(pm_message_t state)
1870 {
1871 	ktime_t starttime = ktime_get();
1872 	int error = 0;
1873 
1874 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1875 	might_sleep();
1876 
1877 	devfreq_suspend();
1878 	cpufreq_suspend();
1879 
1880 	mutex_lock(&dpm_list_mtx);
1881 	pm_transition = state;
1882 	async_error = 0;
1883 	while (!list_empty(&dpm_prepared_list)) {
1884 		struct device *dev = to_device(dpm_prepared_list.prev);
1885 
1886 		get_device(dev);
1887 		mutex_unlock(&dpm_list_mtx);
1888 
1889 		error = device_suspend(dev);
1890 
1891 		mutex_lock(&dpm_list_mtx);
1892 		if (error) {
1893 			pm_dev_err(dev, state, "", error);
1894 			dpm_save_failed_dev(dev_name(dev));
1895 			put_device(dev);
1896 			break;
1897 		}
1898 		if (!list_empty(&dev->power.entry))
1899 			list_move(&dev->power.entry, &dpm_suspended_list);
1900 		put_device(dev);
1901 		if (async_error)
1902 			break;
1903 	}
1904 	mutex_unlock(&dpm_list_mtx);
1905 	async_synchronize_full();
1906 	if (!error)
1907 		error = async_error;
1908 	if (error) {
1909 		suspend_stats.failed_suspend++;
1910 		dpm_save_failed_step(SUSPEND_SUSPEND);
1911 	}
1912 	dpm_show_time(starttime, state, error, NULL);
1913 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1914 	return error;
1915 }
1916 
1917 /**
1918  * device_prepare - Prepare a device for system power transition.
1919  * @dev: Device to handle.
1920  * @state: PM transition of the system being carried out.
1921  *
1922  * Execute the ->prepare() callback(s) for given device.  No new children of the
1923  * device may be registered after this function has returned.
1924  */
device_prepare(struct device * dev,pm_message_t state)1925 static int device_prepare(struct device *dev, pm_message_t state)
1926 {
1927 	int (*callback)(struct device *) = NULL;
1928 	int ret = 0;
1929 
1930 	if (dev->power.syscore)
1931 		return 0;
1932 
1933 	WARN_ON(!pm_runtime_enabled(dev) &&
1934 		dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1935 					      DPM_FLAG_LEAVE_SUSPENDED));
1936 
1937 	/*
1938 	 * If a device's parent goes into runtime suspend at the wrong time,
1939 	 * it won't be possible to resume the device.  To prevent this we
1940 	 * block runtime suspend here, during the prepare phase, and allow
1941 	 * it again during the complete phase.
1942 	 */
1943 	pm_runtime_get_noresume(dev);
1944 
1945 	device_lock(dev);
1946 
1947 	dev->power.wakeup_path = false;
1948 
1949 	if (dev->power.no_pm_callbacks)
1950 		goto unlock;
1951 
1952 	if (dev->pm_domain)
1953 		callback = dev->pm_domain->ops.prepare;
1954 	else if (dev->type && dev->type->pm)
1955 		callback = dev->type->pm->prepare;
1956 	else if (dev->class && dev->class->pm)
1957 		callback = dev->class->pm->prepare;
1958 	else if (dev->bus && dev->bus->pm)
1959 		callback = dev->bus->pm->prepare;
1960 
1961 	if (!callback && dev->driver && dev->driver->pm)
1962 		callback = dev->driver->pm->prepare;
1963 
1964 	if (callback)
1965 		ret = callback(dev);
1966 
1967 unlock:
1968 	device_unlock(dev);
1969 
1970 	if (ret < 0) {
1971 		suspend_report_result(callback, ret);
1972 		pm_runtime_put(dev);
1973 		return ret;
1974 	}
1975 	/*
1976 	 * A positive return value from ->prepare() means "this device appears
1977 	 * to be runtime-suspended and its state is fine, so if it really is
1978 	 * runtime-suspended, you can leave it in that state provided that you
1979 	 * will do the same thing with all of its descendants".  This only
1980 	 * applies to suspend transitions, however.
1981 	 */
1982 	spin_lock_irq(&dev->power.lock);
1983 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1984 		((pm_runtime_suspended(dev) && ret > 0) ||
1985 		 dev->power.no_pm_callbacks) &&
1986 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1987 	spin_unlock_irq(&dev->power.lock);
1988 	return 0;
1989 }
1990 
1991 /**
1992  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1993  * @state: PM transition of the system being carried out.
1994  *
1995  * Execute the ->prepare() callback(s) for all devices.
1996  */
dpm_prepare(pm_message_t state)1997 int dpm_prepare(pm_message_t state)
1998 {
1999 	int error = 0;
2000 
2001 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
2002 	might_sleep();
2003 
2004 	/*
2005 	 * Give a chance for the known devices to complete their probes, before
2006 	 * disable probing of devices. This sync point is important at least
2007 	 * at boot time + hibernation restore.
2008 	 */
2009 	wait_for_device_probe();
2010 	/*
2011 	 * It is unsafe if probing of devices will happen during suspend or
2012 	 * hibernation and system behavior will be unpredictable in this case.
2013 	 * So, let's prohibit device's probing here and defer their probes
2014 	 * instead. The normal behavior will be restored in dpm_complete().
2015 	 */
2016 	device_block_probing();
2017 
2018 	mutex_lock(&dpm_list_mtx);
2019 	while (!list_empty(&dpm_list)) {
2020 		struct device *dev = to_device(dpm_list.next);
2021 
2022 		get_device(dev);
2023 		mutex_unlock(&dpm_list_mtx);
2024 
2025 		trace_device_pm_callback_start(dev, "", state.event);
2026 		error = device_prepare(dev, state);
2027 		trace_device_pm_callback_end(dev, error);
2028 
2029 		mutex_lock(&dpm_list_mtx);
2030 		if (error) {
2031 			if (error == -EAGAIN) {
2032 				put_device(dev);
2033 				error = 0;
2034 				continue;
2035 			}
2036 			pr_info("Device %s not prepared for power transition: code %d\n",
2037 				dev_name(dev), error);
2038 			log_suspend_abort_reason("Device %s not prepared for power transition: code %d",
2039 						 dev_name(dev), error);
2040 			dpm_save_failed_dev(dev_name(dev));
2041 			put_device(dev);
2042 			break;
2043 		}
2044 		dev->power.is_prepared = true;
2045 		if (!list_empty(&dev->power.entry))
2046 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
2047 		put_device(dev);
2048 	}
2049 	mutex_unlock(&dpm_list_mtx);
2050 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2051 	return error;
2052 }
2053 
2054 /**
2055  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2056  * @state: PM transition of the system being carried out.
2057  *
2058  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2059  * callbacks for them.
2060  */
dpm_suspend_start(pm_message_t state)2061 int dpm_suspend_start(pm_message_t state)
2062 {
2063 	ktime_t starttime = ktime_get();
2064 	int error;
2065 
2066 	error = dpm_prepare(state);
2067 	if (error) {
2068 		suspend_stats.failed_prepare++;
2069 		dpm_save_failed_step(SUSPEND_PREPARE);
2070 	} else
2071 		error = dpm_suspend(state);
2072 	dpm_show_time(starttime, state, error, "start");
2073 	return error;
2074 }
2075 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2076 
__suspend_report_result(const char * function,void * fn,int ret)2077 void __suspend_report_result(const char *function, void *fn, int ret)
2078 {
2079 	if (ret)
2080 		pr_err("%s(): %pS returns %d\n", function, fn, ret);
2081 }
2082 EXPORT_SYMBOL_GPL(__suspend_report_result);
2083 
2084 /**
2085  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2086  * @subordinate: Device that needs to wait for @dev.
2087  * @dev: Device to wait for.
2088  */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)2089 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2090 {
2091 	dpm_wait(dev, subordinate->power.async_suspend);
2092 	return async_error;
2093 }
2094 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2095 
2096 /**
2097  * dpm_for_each_dev - device iterator.
2098  * @data: data for the callback.
2099  * @fn: function to be called for each device.
2100  *
2101  * Iterate over devices in dpm_list, and call @fn for each device,
2102  * passing it @data.
2103  */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))2104 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2105 {
2106 	struct device *dev;
2107 
2108 	if (!fn)
2109 		return;
2110 
2111 	device_pm_lock();
2112 	list_for_each_entry(dev, &dpm_list, power.entry)
2113 		fn(dev, data);
2114 	device_pm_unlock();
2115 }
2116 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2117 
pm_ops_is_empty(const struct dev_pm_ops * ops)2118 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2119 {
2120 	if (!ops)
2121 		return true;
2122 
2123 	return !ops->prepare &&
2124 	       !ops->suspend &&
2125 	       !ops->suspend_late &&
2126 	       !ops->suspend_noirq &&
2127 	       !ops->resume_noirq &&
2128 	       !ops->resume_early &&
2129 	       !ops->resume &&
2130 	       !ops->complete;
2131 }
2132 
device_pm_check_callbacks(struct device * dev)2133 void device_pm_check_callbacks(struct device *dev)
2134 {
2135 	unsigned long flags;
2136 
2137 	spin_lock_irqsave(&dev->power.lock, flags);
2138 	dev->power.no_pm_callbacks =
2139 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2140 		 !dev->bus->suspend && !dev->bus->resume)) &&
2141 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2142 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2143 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2144 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2145 		 !dev->driver->suspend && !dev->driver->resume));
2146 	spin_unlock_irqrestore(&dev->power.lock, flags);
2147 }
2148 
dev_pm_smart_suspend_and_suspended(struct device * dev)2149 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2150 {
2151 	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2152 		pm_runtime_status_suspended(dev);
2153 }
2154