• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19 
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36 #include <linux/wakeup_reason.h>
37 
38 #include "../base.h"
39 #include "power.h"
40 
41 typedef int (*pm_callback_t)(struct device *);
42 
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52 
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58 
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62 
63 static int async_error;
64 
pm_verb(int event)65 static char *pm_verb(int event)
66 {
67 	switch (event) {
68 	case PM_EVENT_SUSPEND:
69 		return "suspend";
70 	case PM_EVENT_RESUME:
71 		return "resume";
72 	case PM_EVENT_FREEZE:
73 		return "freeze";
74 	case PM_EVENT_QUIESCE:
75 		return "quiesce";
76 	case PM_EVENT_HIBERNATE:
77 		return "hibernate";
78 	case PM_EVENT_THAW:
79 		return "thaw";
80 	case PM_EVENT_RESTORE:
81 		return "restore";
82 	case PM_EVENT_RECOVER:
83 		return "recover";
84 	default:
85 		return "(unknown PM event)";
86 	}
87 }
88 
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
device_pm_sleep_init(struct device * dev)93 void device_pm_sleep_init(struct device *dev)
94 {
95 	dev->power.is_prepared = false;
96 	dev->power.is_suspended = false;
97 	dev->power.is_noirq_suspended = false;
98 	dev->power.is_late_suspended = false;
99 	init_completion(&dev->power.completion);
100 	complete_all(&dev->power.completion);
101 	dev->power.wakeup = NULL;
102 	INIT_LIST_HEAD(&dev->power.entry);
103 }
104 
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
device_pm_lock(void)108 void device_pm_lock(void)
109 {
110 	mutex_lock(&dpm_list_mtx);
111 }
112 
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
device_pm_unlock(void)116 void device_pm_unlock(void)
117 {
118 	mutex_unlock(&dpm_list_mtx);
119 }
120 
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
device_pm_add(struct device * dev)125 void device_pm_add(struct device *dev)
126 {
127 	pr_debug("PM: Adding info for %s:%s\n",
128 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129 	device_pm_check_callbacks(dev);
130 	mutex_lock(&dpm_list_mtx);
131 	if (dev->parent && dev->parent->power.is_prepared)
132 		dev_warn(dev, "parent %s should not be sleeping\n",
133 			dev_name(dev->parent));
134 	list_add_tail(&dev->power.entry, &dpm_list);
135 	mutex_unlock(&dpm_list_mtx);
136 }
137 
138 /**
139  * device_pm_remove - Remove a device from the PM core's list of active devices.
140  * @dev: Device to be removed from the list.
141  */
device_pm_remove(struct device * dev)142 void device_pm_remove(struct device *dev)
143 {
144 	pr_debug("PM: Removing info for %s:%s\n",
145 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
146 	complete_all(&dev->power.completion);
147 	mutex_lock(&dpm_list_mtx);
148 	list_del_init(&dev->power.entry);
149 	mutex_unlock(&dpm_list_mtx);
150 	device_wakeup_disable(dev);
151 	pm_runtime_remove(dev);
152 	device_pm_check_callbacks(dev);
153 }
154 
155 /**
156  * device_pm_move_before - Move device in the PM core's list of active devices.
157  * @deva: Device to move in dpm_list.
158  * @devb: Device @deva should come before.
159  */
device_pm_move_before(struct device * deva,struct device * devb)160 void device_pm_move_before(struct device *deva, struct device *devb)
161 {
162 	pr_debug("PM: Moving %s:%s before %s:%s\n",
163 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
164 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
165 	/* Delete deva from dpm_list and reinsert before devb. */
166 	list_move_tail(&deva->power.entry, &devb->power.entry);
167 }
168 
169 /**
170  * device_pm_move_after - Move device in the PM core's list of active devices.
171  * @deva: Device to move in dpm_list.
172  * @devb: Device @deva should come after.
173  */
device_pm_move_after(struct device * deva,struct device * devb)174 void device_pm_move_after(struct device *deva, struct device *devb)
175 {
176 	pr_debug("PM: Moving %s:%s after %s:%s\n",
177 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
178 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
179 	/* Delete deva from dpm_list and reinsert after devb. */
180 	list_move(&deva->power.entry, &devb->power.entry);
181 }
182 
183 /**
184  * device_pm_move_last - Move device to end of the PM core's list of devices.
185  * @dev: Device to move in dpm_list.
186  */
device_pm_move_last(struct device * dev)187 void device_pm_move_last(struct device *dev)
188 {
189 	pr_debug("PM: Moving %s:%s to end of list\n",
190 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
191 	list_move_tail(&dev->power.entry, &dpm_list);
192 }
193 
initcall_debug_start(struct device * dev)194 static ktime_t initcall_debug_start(struct device *dev)
195 {
196 	ktime_t calltime = ktime_set(0, 0);
197 
198 	if (pm_print_times_enabled) {
199 		pr_info("calling  %s+ @ %i, parent: %s\n",
200 			dev_name(dev), task_pid_nr(current),
201 			dev->parent ? dev_name(dev->parent) : "none");
202 		calltime = ktime_get();
203 	}
204 
205 	return calltime;
206 }
207 
initcall_debug_report(struct device * dev,ktime_t calltime,int error,pm_message_t state,char * info)208 static void initcall_debug_report(struct device *dev, ktime_t calltime,
209 				  int error, pm_message_t state, char *info)
210 {
211 	ktime_t rettime;
212 	s64 nsecs;
213 
214 	rettime = ktime_get();
215 	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
216 
217 	if (pm_print_times_enabled) {
218 		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
219 			error, (unsigned long long)nsecs >> 10);
220 	}
221 }
222 
223 /**
224  * dpm_wait - Wait for a PM operation to complete.
225  * @dev: Device to wait for.
226  * @async: If unset, wait only if the device's power.async_suspend flag is set.
227  */
dpm_wait(struct device * dev,bool async)228 static void dpm_wait(struct device *dev, bool async)
229 {
230 	if (!dev)
231 		return;
232 
233 	if (async || (pm_async_enabled && dev->power.async_suspend))
234 		wait_for_completion(&dev->power.completion);
235 }
236 
dpm_wait_fn(struct device * dev,void * async_ptr)237 static int dpm_wait_fn(struct device *dev, void *async_ptr)
238 {
239 	dpm_wait(dev, *((bool *)async_ptr));
240 	return 0;
241 }
242 
dpm_wait_for_children(struct device * dev,bool async)243 static void dpm_wait_for_children(struct device *dev, bool async)
244 {
245        device_for_each_child(dev, &async, dpm_wait_fn);
246 }
247 
248 /**
249  * pm_op - Return the PM operation appropriate for given PM event.
250  * @ops: PM operations to choose from.
251  * @state: PM transition of the system being carried out.
252  */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)253 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
254 {
255 	switch (state.event) {
256 #ifdef CONFIG_SUSPEND
257 	case PM_EVENT_SUSPEND:
258 		return ops->suspend;
259 	case PM_EVENT_RESUME:
260 		return ops->resume;
261 #endif /* CONFIG_SUSPEND */
262 #ifdef CONFIG_HIBERNATE_CALLBACKS
263 	case PM_EVENT_FREEZE:
264 	case PM_EVENT_QUIESCE:
265 		return ops->freeze;
266 	case PM_EVENT_HIBERNATE:
267 		return ops->poweroff;
268 	case PM_EVENT_THAW:
269 	case PM_EVENT_RECOVER:
270 		return ops->thaw;
271 		break;
272 	case PM_EVENT_RESTORE:
273 		return ops->restore;
274 #endif /* CONFIG_HIBERNATE_CALLBACKS */
275 	}
276 
277 	return NULL;
278 }
279 
280 /**
281  * pm_late_early_op - Return the PM operation appropriate for given PM event.
282  * @ops: PM operations to choose from.
283  * @state: PM transition of the system being carried out.
284  *
285  * Runtime PM is disabled for @dev while this function is being executed.
286  */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)287 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
288 				      pm_message_t state)
289 {
290 	switch (state.event) {
291 #ifdef CONFIG_SUSPEND
292 	case PM_EVENT_SUSPEND:
293 		return ops->suspend_late;
294 	case PM_EVENT_RESUME:
295 		return ops->resume_early;
296 #endif /* CONFIG_SUSPEND */
297 #ifdef CONFIG_HIBERNATE_CALLBACKS
298 	case PM_EVENT_FREEZE:
299 	case PM_EVENT_QUIESCE:
300 		return ops->freeze_late;
301 	case PM_EVENT_HIBERNATE:
302 		return ops->poweroff_late;
303 	case PM_EVENT_THAW:
304 	case PM_EVENT_RECOVER:
305 		return ops->thaw_early;
306 	case PM_EVENT_RESTORE:
307 		return ops->restore_early;
308 #endif /* CONFIG_HIBERNATE_CALLBACKS */
309 	}
310 
311 	return NULL;
312 }
313 
314 /**
315  * pm_noirq_op - Return the PM operation appropriate for given PM event.
316  * @ops: PM operations to choose from.
317  * @state: PM transition of the system being carried out.
318  *
319  * The driver of @dev will not receive interrupts while this function is being
320  * executed.
321  */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)322 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
323 {
324 	switch (state.event) {
325 #ifdef CONFIG_SUSPEND
326 	case PM_EVENT_SUSPEND:
327 		return ops->suspend_noirq;
328 	case PM_EVENT_RESUME:
329 		return ops->resume_noirq;
330 #endif /* CONFIG_SUSPEND */
331 #ifdef CONFIG_HIBERNATE_CALLBACKS
332 	case PM_EVENT_FREEZE:
333 	case PM_EVENT_QUIESCE:
334 		return ops->freeze_noirq;
335 	case PM_EVENT_HIBERNATE:
336 		return ops->poweroff_noirq;
337 	case PM_EVENT_THAW:
338 	case PM_EVENT_RECOVER:
339 		return ops->thaw_noirq;
340 	case PM_EVENT_RESTORE:
341 		return ops->restore_noirq;
342 #endif /* CONFIG_HIBERNATE_CALLBACKS */
343 	}
344 
345 	return NULL;
346 }
347 
pm_dev_dbg(struct device * dev,pm_message_t state,char * info)348 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
349 {
350 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
351 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
352 		", may wakeup" : "");
353 }
354 
pm_dev_err(struct device * dev,pm_message_t state,char * info,int error)355 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
356 			int error)
357 {
358 	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
359 		dev_name(dev), pm_verb(state.event), info, error);
360 }
361 
dpm_show_time(ktime_t starttime,pm_message_t state,char * info)362 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
363 {
364 	ktime_t calltime;
365 	u64 usecs64;
366 	int usecs;
367 
368 	calltime = ktime_get();
369 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
370 	do_div(usecs64, NSEC_PER_USEC);
371 	usecs = usecs64;
372 	if (usecs == 0)
373 		usecs = 1;
374 	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
375 		info ?: "", info ? " " : "", pm_verb(state.event),
376 		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
377 }
378 
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,char * info)379 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
380 			    pm_message_t state, char *info)
381 {
382 	ktime_t calltime;
383 	int error;
384 
385 	if (!cb)
386 		return 0;
387 
388 	calltime = initcall_debug_start(dev);
389 
390 	pm_dev_dbg(dev, state, info);
391 	trace_device_pm_callback_start(dev, info, state.event);
392 	error = cb(dev);
393 	trace_device_pm_callback_end(dev, error);
394 	suspend_report_result(cb, error);
395 
396 	initcall_debug_report(dev, calltime, error, state, info);
397 
398 	return error;
399 }
400 
401 #ifdef CONFIG_DPM_WATCHDOG
402 struct dpm_watchdog {
403 	struct device		*dev;
404 	struct task_struct	*tsk;
405 	struct timer_list	timer;
406 };
407 
408 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
409 	struct dpm_watchdog wd
410 
411 /**
412  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
413  * @data: Watchdog object address.
414  *
415  * Called when a driver has timed out suspending or resuming.
416  * There's not much we can do here to recover so panic() to
417  * capture a crash-dump in pstore.
418  */
dpm_watchdog_handler(unsigned long data)419 static void dpm_watchdog_handler(unsigned long data)
420 {
421 	struct dpm_watchdog *wd = (void *)data;
422 
423 	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
424 	show_stack(wd->tsk, NULL);
425 	panic("%s %s: unrecoverable failure\n",
426 		dev_driver_string(wd->dev), dev_name(wd->dev));
427 }
428 
429 /**
430  * dpm_watchdog_set - Enable pm watchdog for given device.
431  * @wd: Watchdog. Must be allocated on the stack.
432  * @dev: Device to handle.
433  */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)434 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
435 {
436 	struct timer_list *timer = &wd->timer;
437 
438 	wd->dev = dev;
439 	wd->tsk = current;
440 
441 	init_timer_on_stack(timer);
442 	/* use same timeout value for both suspend and resume */
443 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
444 	timer->function = dpm_watchdog_handler;
445 	timer->data = (unsigned long)wd;
446 	add_timer(timer);
447 }
448 
449 /**
450  * dpm_watchdog_clear - Disable suspend/resume watchdog.
451  * @wd: Watchdog to disable.
452  */
dpm_watchdog_clear(struct dpm_watchdog * wd)453 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
454 {
455 	struct timer_list *timer = &wd->timer;
456 
457 	del_timer_sync(timer);
458 	destroy_timer_on_stack(timer);
459 }
460 #else
461 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
462 #define dpm_watchdog_set(x, y)
463 #define dpm_watchdog_clear(x)
464 #endif
465 
466 /*------------------------- Resume routines -------------------------*/
467 
468 /**
469  * device_resume_noirq - Execute an "early resume" callback for given device.
470  * @dev: Device to handle.
471  * @state: PM transition of the system being carried out.
472  * @async: If true, the device is being resumed asynchronously.
473  *
474  * The driver of @dev will not receive interrupts while this function is being
475  * executed.
476  */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)477 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
478 {
479 	pm_callback_t callback = NULL;
480 	char *info = NULL;
481 	int error = 0;
482 
483 	TRACE_DEVICE(dev);
484 	TRACE_RESUME(0);
485 
486 	if (dev->power.syscore || dev->power.direct_complete)
487 		goto Out;
488 
489 	if (!dev->power.is_noirq_suspended)
490 		goto Out;
491 
492 	dpm_wait(dev->parent, async);
493 
494 	if (dev->pm_domain) {
495 		info = "noirq power domain ";
496 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
497 	} else if (dev->type && dev->type->pm) {
498 		info = "noirq type ";
499 		callback = pm_noirq_op(dev->type->pm, state);
500 	} else if (dev->class && dev->class->pm) {
501 		info = "noirq class ";
502 		callback = pm_noirq_op(dev->class->pm, state);
503 	} else if (dev->bus && dev->bus->pm) {
504 		info = "noirq bus ";
505 		callback = pm_noirq_op(dev->bus->pm, state);
506 	}
507 
508 	if (!callback && dev->driver && dev->driver->pm) {
509 		info = "noirq driver ";
510 		callback = pm_noirq_op(dev->driver->pm, state);
511 	}
512 
513 	error = dpm_run_callback(callback, dev, state, info);
514 	dev->power.is_noirq_suspended = false;
515 
516  Out:
517 	complete_all(&dev->power.completion);
518 	TRACE_RESUME(error);
519 	return error;
520 }
521 
is_async(struct device * dev)522 static bool is_async(struct device *dev)
523 {
524 	return dev->power.async_suspend && pm_async_enabled
525 		&& !pm_trace_is_enabled();
526 }
527 
async_resume_noirq(void * data,async_cookie_t cookie)528 static void async_resume_noirq(void *data, async_cookie_t cookie)
529 {
530 	struct device *dev = (struct device *)data;
531 	int error;
532 
533 	error = device_resume_noirq(dev, pm_transition, true);
534 	if (error)
535 		pm_dev_err(dev, pm_transition, " async", error);
536 
537 	put_device(dev);
538 }
539 
540 /**
541  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
542  * @state: PM transition of the system being carried out.
543  *
544  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
545  * enable device drivers to receive interrupts.
546  */
dpm_resume_noirq(pm_message_t state)547 void dpm_resume_noirq(pm_message_t state)
548 {
549 	struct device *dev;
550 	ktime_t starttime = ktime_get();
551 
552 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
553 	mutex_lock(&dpm_list_mtx);
554 	pm_transition = state;
555 
556 	/*
557 	 * Advanced the async threads upfront,
558 	 * in case the starting of async threads is
559 	 * delayed by non-async resuming devices.
560 	 */
561 	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
562 		reinit_completion(&dev->power.completion);
563 		if (is_async(dev)) {
564 			get_device(dev);
565 			async_schedule(async_resume_noirq, dev);
566 		}
567 	}
568 
569 	while (!list_empty(&dpm_noirq_list)) {
570 		dev = to_device(dpm_noirq_list.next);
571 		get_device(dev);
572 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
573 		mutex_unlock(&dpm_list_mtx);
574 
575 		if (!is_async(dev)) {
576 			int error;
577 
578 			error = device_resume_noirq(dev, state, false);
579 			if (error) {
580 				suspend_stats.failed_resume_noirq++;
581 				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
582 				dpm_save_failed_dev(dev_name(dev));
583 				pm_dev_err(dev, state, " noirq", error);
584 			}
585 		}
586 
587 		mutex_lock(&dpm_list_mtx);
588 		put_device(dev);
589 	}
590 	mutex_unlock(&dpm_list_mtx);
591 	async_synchronize_full();
592 	dpm_show_time(starttime, state, "noirq");
593 	resume_device_irqs();
594 	device_wakeup_disarm_wake_irqs();
595 	cpuidle_resume();
596 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
597 }
598 
599 /**
600  * device_resume_early - Execute an "early resume" callback for given device.
601  * @dev: Device to handle.
602  * @state: PM transition of the system being carried out.
603  * @async: If true, the device is being resumed asynchronously.
604  *
605  * Runtime PM is disabled for @dev while this function is being executed.
606  */
device_resume_early(struct device * dev,pm_message_t state,bool async)607 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
608 {
609 	pm_callback_t callback = NULL;
610 	char *info = NULL;
611 	int error = 0;
612 
613 	TRACE_DEVICE(dev);
614 	TRACE_RESUME(0);
615 
616 	if (dev->power.syscore || dev->power.direct_complete)
617 		goto Out;
618 
619 	if (!dev->power.is_late_suspended)
620 		goto Out;
621 
622 	dpm_wait(dev->parent, async);
623 
624 	if (dev->pm_domain) {
625 		info = "early power domain ";
626 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
627 	} else if (dev->type && dev->type->pm) {
628 		info = "early type ";
629 		callback = pm_late_early_op(dev->type->pm, state);
630 	} else if (dev->class && dev->class->pm) {
631 		info = "early class ";
632 		callback = pm_late_early_op(dev->class->pm, state);
633 	} else if (dev->bus && dev->bus->pm) {
634 		info = "early bus ";
635 		callback = pm_late_early_op(dev->bus->pm, state);
636 	}
637 
638 	if (!callback && dev->driver && dev->driver->pm) {
639 		info = "early driver ";
640 		callback = pm_late_early_op(dev->driver->pm, state);
641 	}
642 
643 	error = dpm_run_callback(callback, dev, state, info);
644 	dev->power.is_late_suspended = false;
645 
646  Out:
647 	TRACE_RESUME(error);
648 
649 	pm_runtime_enable(dev);
650 	complete_all(&dev->power.completion);
651 	return error;
652 }
653 
async_resume_early(void * data,async_cookie_t cookie)654 static void async_resume_early(void *data, async_cookie_t cookie)
655 {
656 	struct device *dev = (struct device *)data;
657 	int error;
658 
659 	error = device_resume_early(dev, pm_transition, true);
660 	if (error)
661 		pm_dev_err(dev, pm_transition, " async", error);
662 
663 	put_device(dev);
664 }
665 
666 /**
667  * dpm_resume_early - Execute "early resume" callbacks for all devices.
668  * @state: PM transition of the system being carried out.
669  */
dpm_resume_early(pm_message_t state)670 void dpm_resume_early(pm_message_t state)
671 {
672 	struct device *dev;
673 	ktime_t starttime = ktime_get();
674 
675 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
676 	mutex_lock(&dpm_list_mtx);
677 	pm_transition = state;
678 
679 	/*
680 	 * Advanced the async threads upfront,
681 	 * in case the starting of async threads is
682 	 * delayed by non-async resuming devices.
683 	 */
684 	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
685 		reinit_completion(&dev->power.completion);
686 		if (is_async(dev)) {
687 			get_device(dev);
688 			async_schedule(async_resume_early, dev);
689 		}
690 	}
691 
692 	while (!list_empty(&dpm_late_early_list)) {
693 		dev = to_device(dpm_late_early_list.next);
694 		get_device(dev);
695 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
696 		mutex_unlock(&dpm_list_mtx);
697 
698 		if (!is_async(dev)) {
699 			int error;
700 
701 			error = device_resume_early(dev, state, false);
702 			if (error) {
703 				suspend_stats.failed_resume_early++;
704 				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
705 				dpm_save_failed_dev(dev_name(dev));
706 				pm_dev_err(dev, state, " early", error);
707 			}
708 		}
709 		mutex_lock(&dpm_list_mtx);
710 		put_device(dev);
711 	}
712 	mutex_unlock(&dpm_list_mtx);
713 	async_synchronize_full();
714 	dpm_show_time(starttime, state, "early");
715 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
716 }
717 
718 /**
719  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
720  * @state: PM transition of the system being carried out.
721  */
dpm_resume_start(pm_message_t state)722 void dpm_resume_start(pm_message_t state)
723 {
724 	dpm_resume_noirq(state);
725 	dpm_resume_early(state);
726 }
727 EXPORT_SYMBOL_GPL(dpm_resume_start);
728 
729 /**
730  * device_resume - Execute "resume" callbacks for given device.
731  * @dev: Device to handle.
732  * @state: PM transition of the system being carried out.
733  * @async: If true, the device is being resumed asynchronously.
734  */
device_resume(struct device * dev,pm_message_t state,bool async)735 static int device_resume(struct device *dev, pm_message_t state, bool async)
736 {
737 	pm_callback_t callback = NULL;
738 	char *info = NULL;
739 	int error = 0;
740 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
741 
742 	TRACE_DEVICE(dev);
743 	TRACE_RESUME(0);
744 
745 	if (dev->power.syscore)
746 		goto Complete;
747 
748 	if (dev->power.direct_complete) {
749 		/* Match the pm_runtime_disable() in __device_suspend(). */
750 		pm_runtime_enable(dev);
751 		goto Complete;
752 	}
753 
754 	dpm_wait(dev->parent, async);
755 	dpm_watchdog_set(&wd, dev);
756 	device_lock(dev);
757 
758 	/*
759 	 * This is a fib.  But we'll allow new children to be added below
760 	 * a resumed device, even if the device hasn't been completed yet.
761 	 */
762 	dev->power.is_prepared = false;
763 
764 	if (!dev->power.is_suspended)
765 		goto Unlock;
766 
767 	if (dev->pm_domain) {
768 		info = "power domain ";
769 		callback = pm_op(&dev->pm_domain->ops, state);
770 		goto Driver;
771 	}
772 
773 	if (dev->type && dev->type->pm) {
774 		info = "type ";
775 		callback = pm_op(dev->type->pm, state);
776 		goto Driver;
777 	}
778 
779 	if (dev->class) {
780 		if (dev->class->pm) {
781 			info = "class ";
782 			callback = pm_op(dev->class->pm, state);
783 			goto Driver;
784 		} else if (dev->class->resume) {
785 			info = "legacy class ";
786 			callback = dev->class->resume;
787 			goto End;
788 		}
789 	}
790 
791 	if (dev->bus) {
792 		if (dev->bus->pm) {
793 			info = "bus ";
794 			callback = pm_op(dev->bus->pm, state);
795 		} else if (dev->bus->resume) {
796 			info = "legacy bus ";
797 			callback = dev->bus->resume;
798 			goto End;
799 		}
800 	}
801 
802  Driver:
803 	if (!callback && dev->driver && dev->driver->pm) {
804 		info = "driver ";
805 		callback = pm_op(dev->driver->pm, state);
806 	}
807 
808  End:
809 	error = dpm_run_callback(callback, dev, state, info);
810 	dev->power.is_suspended = false;
811 
812  Unlock:
813 	device_unlock(dev);
814 	dpm_watchdog_clear(&wd);
815 
816  Complete:
817 	complete_all(&dev->power.completion);
818 
819 	TRACE_RESUME(error);
820 
821 	return error;
822 }
823 
async_resume(void * data,async_cookie_t cookie)824 static void async_resume(void *data, async_cookie_t cookie)
825 {
826 	struct device *dev = (struct device *)data;
827 	int error;
828 
829 	error = device_resume(dev, pm_transition, true);
830 	if (error)
831 		pm_dev_err(dev, pm_transition, " async", error);
832 	put_device(dev);
833 }
834 
835 /**
836  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
837  * @state: PM transition of the system being carried out.
838  *
839  * Execute the appropriate "resume" callback for all devices whose status
840  * indicates that they are suspended.
841  */
dpm_resume(pm_message_t state)842 void dpm_resume(pm_message_t state)
843 {
844 	struct device *dev;
845 	ktime_t starttime = ktime_get();
846 
847 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
848 	might_sleep();
849 
850 	mutex_lock(&dpm_list_mtx);
851 	pm_transition = state;
852 	async_error = 0;
853 
854 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
855 		reinit_completion(&dev->power.completion);
856 		if (is_async(dev)) {
857 			get_device(dev);
858 			async_schedule(async_resume, dev);
859 		}
860 	}
861 
862 	while (!list_empty(&dpm_suspended_list)) {
863 		dev = to_device(dpm_suspended_list.next);
864 		get_device(dev);
865 		if (!is_async(dev)) {
866 			int error;
867 
868 			mutex_unlock(&dpm_list_mtx);
869 
870 			error = device_resume(dev, state, false);
871 			if (error) {
872 				suspend_stats.failed_resume++;
873 				dpm_save_failed_step(SUSPEND_RESUME);
874 				dpm_save_failed_dev(dev_name(dev));
875 				pm_dev_err(dev, state, "", error);
876 			}
877 
878 			mutex_lock(&dpm_list_mtx);
879 		}
880 		if (!list_empty(&dev->power.entry))
881 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
882 		put_device(dev);
883 	}
884 	mutex_unlock(&dpm_list_mtx);
885 	async_synchronize_full();
886 	dpm_show_time(starttime, state, NULL);
887 
888 	cpufreq_resume();
889 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
890 }
891 
892 /**
893  * device_complete - Complete a PM transition for given device.
894  * @dev: Device to handle.
895  * @state: PM transition of the system being carried out.
896  */
device_complete(struct device * dev,pm_message_t state)897 static void device_complete(struct device *dev, pm_message_t state)
898 {
899 	void (*callback)(struct device *) = NULL;
900 	char *info = NULL;
901 
902 	if (dev->power.syscore)
903 		return;
904 
905 	device_lock(dev);
906 
907 	if (dev->pm_domain) {
908 		info = "completing power domain ";
909 		callback = dev->pm_domain->ops.complete;
910 	} else if (dev->type && dev->type->pm) {
911 		info = "completing type ";
912 		callback = dev->type->pm->complete;
913 	} else if (dev->class && dev->class->pm) {
914 		info = "completing class ";
915 		callback = dev->class->pm->complete;
916 	} else if (dev->bus && dev->bus->pm) {
917 		info = "completing bus ";
918 		callback = dev->bus->pm->complete;
919 	}
920 
921 	if (!callback && dev->driver && dev->driver->pm) {
922 		info = "completing driver ";
923 		callback = dev->driver->pm->complete;
924 	}
925 
926 	if (callback) {
927 		pm_dev_dbg(dev, state, info);
928 		callback(dev);
929 	}
930 
931 	device_unlock(dev);
932 
933 	pm_runtime_put(dev);
934 }
935 
936 /**
937  * dpm_complete - Complete a PM transition for all non-sysdev devices.
938  * @state: PM transition of the system being carried out.
939  *
940  * Execute the ->complete() callbacks for all devices whose PM status is not
941  * DPM_ON (this allows new devices to be registered).
942  */
dpm_complete(pm_message_t state)943 void dpm_complete(pm_message_t state)
944 {
945 	struct list_head list;
946 
947 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
948 	might_sleep();
949 
950 	INIT_LIST_HEAD(&list);
951 	mutex_lock(&dpm_list_mtx);
952 	while (!list_empty(&dpm_prepared_list)) {
953 		struct device *dev = to_device(dpm_prepared_list.prev);
954 
955 		get_device(dev);
956 		dev->power.is_prepared = false;
957 		list_move(&dev->power.entry, &list);
958 		mutex_unlock(&dpm_list_mtx);
959 
960 		trace_device_pm_callback_start(dev, "", state.event);
961 		device_complete(dev, state);
962 		trace_device_pm_callback_end(dev, 0);
963 
964 		mutex_lock(&dpm_list_mtx);
965 		put_device(dev);
966 	}
967 	list_splice(&list, &dpm_list);
968 	mutex_unlock(&dpm_list_mtx);
969 
970 	/* Allow device probing and trigger re-probing of deferred devices */
971 	device_unblock_probing();
972 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
973 }
974 
975 /**
976  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
977  * @state: PM transition of the system being carried out.
978  *
979  * Execute "resume" callbacks for all devices and complete the PM transition of
980  * the system.
981  */
dpm_resume_end(pm_message_t state)982 void dpm_resume_end(pm_message_t state)
983 {
984 	dpm_resume(state);
985 	dpm_complete(state);
986 }
987 EXPORT_SYMBOL_GPL(dpm_resume_end);
988 
989 
990 /*------------------------- Suspend routines -------------------------*/
991 
992 /**
993  * resume_event - Return a "resume" message for given "suspend" sleep state.
994  * @sleep_state: PM message representing a sleep state.
995  *
996  * Return a PM message representing the resume event corresponding to given
997  * sleep state.
998  */
resume_event(pm_message_t sleep_state)999 static pm_message_t resume_event(pm_message_t sleep_state)
1000 {
1001 	switch (sleep_state.event) {
1002 	case PM_EVENT_SUSPEND:
1003 		return PMSG_RESUME;
1004 	case PM_EVENT_FREEZE:
1005 	case PM_EVENT_QUIESCE:
1006 		return PMSG_RECOVER;
1007 	case PM_EVENT_HIBERNATE:
1008 		return PMSG_RESTORE;
1009 	}
1010 	return PMSG_ON;
1011 }
1012 
1013 /**
1014  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1015  * @dev: Device to handle.
1016  * @state: PM transition of the system being carried out.
1017  * @async: If true, the device is being suspended asynchronously.
1018  *
1019  * The driver of @dev will not receive interrupts while this function is being
1020  * executed.
1021  */
__device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1022 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1023 {
1024 	pm_callback_t callback = NULL;
1025 	char *info = NULL;
1026 	int error = 0;
1027 
1028 	TRACE_DEVICE(dev);
1029 	TRACE_SUSPEND(0);
1030 
1031 	dpm_wait_for_children(dev, async);
1032 
1033 	if (async_error)
1034 		goto Complete;
1035 
1036 	if (pm_wakeup_pending()) {
1037 		async_error = -EBUSY;
1038 		goto Complete;
1039 	}
1040 
1041 	if (dev->power.syscore || dev->power.direct_complete)
1042 		goto Complete;
1043 
1044 	if (dev->pm_domain) {
1045 		info = "noirq power domain ";
1046 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1047 	} else if (dev->type && dev->type->pm) {
1048 		info = "noirq type ";
1049 		callback = pm_noirq_op(dev->type->pm, state);
1050 	} else if (dev->class && dev->class->pm) {
1051 		info = "noirq class ";
1052 		callback = pm_noirq_op(dev->class->pm, state);
1053 	} else if (dev->bus && dev->bus->pm) {
1054 		info = "noirq bus ";
1055 		callback = pm_noirq_op(dev->bus->pm, state);
1056 	}
1057 
1058 	if (!callback && dev->driver && dev->driver->pm) {
1059 		info = "noirq driver ";
1060 		callback = pm_noirq_op(dev->driver->pm, state);
1061 	}
1062 
1063 	error = dpm_run_callback(callback, dev, state, info);
1064 	if (!error)
1065 		dev->power.is_noirq_suspended = true;
1066 	else
1067 		async_error = error;
1068 
1069 Complete:
1070 	complete_all(&dev->power.completion);
1071 	TRACE_SUSPEND(error);
1072 	return error;
1073 }
1074 
async_suspend_noirq(void * data,async_cookie_t cookie)1075 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1076 {
1077 	struct device *dev = (struct device *)data;
1078 	int error;
1079 
1080 	error = __device_suspend_noirq(dev, pm_transition, true);
1081 	if (error) {
1082 		dpm_save_failed_dev(dev_name(dev));
1083 		pm_dev_err(dev, pm_transition, " async", error);
1084 	}
1085 
1086 	put_device(dev);
1087 }
1088 
device_suspend_noirq(struct device * dev)1089 static int device_suspend_noirq(struct device *dev)
1090 {
1091 	reinit_completion(&dev->power.completion);
1092 
1093 	if (is_async(dev)) {
1094 		get_device(dev);
1095 		async_schedule(async_suspend_noirq, dev);
1096 		return 0;
1097 	}
1098 	return __device_suspend_noirq(dev, pm_transition, false);
1099 }
1100 
1101 /**
1102  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1103  * @state: PM transition of the system being carried out.
1104  *
1105  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1106  * handlers for all non-sysdev devices.
1107  */
dpm_suspend_noirq(pm_message_t state)1108 int dpm_suspend_noirq(pm_message_t state)
1109 {
1110 	ktime_t starttime = ktime_get();
1111 	int error = 0;
1112 
1113 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1114 	cpuidle_pause();
1115 	device_wakeup_arm_wake_irqs();
1116 	suspend_device_irqs();
1117 	mutex_lock(&dpm_list_mtx);
1118 	pm_transition = state;
1119 	async_error = 0;
1120 
1121 	while (!list_empty(&dpm_late_early_list)) {
1122 		struct device *dev = to_device(dpm_late_early_list.prev);
1123 
1124 		get_device(dev);
1125 		mutex_unlock(&dpm_list_mtx);
1126 
1127 		error = device_suspend_noirq(dev);
1128 
1129 		mutex_lock(&dpm_list_mtx);
1130 		if (error) {
1131 			pm_dev_err(dev, state, " noirq", error);
1132 			dpm_save_failed_dev(dev_name(dev));
1133 			put_device(dev);
1134 			break;
1135 		}
1136 		if (!list_empty(&dev->power.entry))
1137 			list_move(&dev->power.entry, &dpm_noirq_list);
1138 		put_device(dev);
1139 
1140 		if (async_error)
1141 			break;
1142 	}
1143 	mutex_unlock(&dpm_list_mtx);
1144 	async_synchronize_full();
1145 	if (!error)
1146 		error = async_error;
1147 
1148 	if (error) {
1149 		suspend_stats.failed_suspend_noirq++;
1150 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1151 		dpm_resume_noirq(resume_event(state));
1152 	} else {
1153 		dpm_show_time(starttime, state, "noirq");
1154 	}
1155 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1156 	return error;
1157 }
1158 
1159 /**
1160  * device_suspend_late - Execute a "late suspend" callback for given device.
1161  * @dev: Device to handle.
1162  * @state: PM transition of the system being carried out.
1163  * @async: If true, the device is being suspended asynchronously.
1164  *
1165  * Runtime PM is disabled for @dev while this function is being executed.
1166  */
__device_suspend_late(struct device * dev,pm_message_t state,bool async)1167 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1168 {
1169 	pm_callback_t callback = NULL;
1170 	char *info = NULL;
1171 	int error = 0;
1172 
1173 	TRACE_DEVICE(dev);
1174 	TRACE_SUSPEND(0);
1175 
1176 	__pm_runtime_disable(dev, false);
1177 
1178 	dpm_wait_for_children(dev, async);
1179 
1180 	if (async_error)
1181 		goto Complete;
1182 
1183 	if (pm_wakeup_pending()) {
1184 		async_error = -EBUSY;
1185 		goto Complete;
1186 	}
1187 
1188 	if (dev->power.syscore || dev->power.direct_complete)
1189 		goto Complete;
1190 
1191 	if (dev->pm_domain) {
1192 		info = "late power domain ";
1193 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1194 	} else if (dev->type && dev->type->pm) {
1195 		info = "late type ";
1196 		callback = pm_late_early_op(dev->type->pm, state);
1197 	} else if (dev->class && dev->class->pm) {
1198 		info = "late class ";
1199 		callback = pm_late_early_op(dev->class->pm, state);
1200 	} else if (dev->bus && dev->bus->pm) {
1201 		info = "late bus ";
1202 		callback = pm_late_early_op(dev->bus->pm, state);
1203 	}
1204 
1205 	if (!callback && dev->driver && dev->driver->pm) {
1206 		info = "late driver ";
1207 		callback = pm_late_early_op(dev->driver->pm, state);
1208 	}
1209 
1210 	error = dpm_run_callback(callback, dev, state, info);
1211 	if (!error)
1212 		dev->power.is_late_suspended = true;
1213 	else
1214 		async_error = error;
1215 
1216 Complete:
1217 	TRACE_SUSPEND(error);
1218 	complete_all(&dev->power.completion);
1219 	return error;
1220 }
1221 
async_suspend_late(void * data,async_cookie_t cookie)1222 static void async_suspend_late(void *data, async_cookie_t cookie)
1223 {
1224 	struct device *dev = (struct device *)data;
1225 	int error;
1226 
1227 	error = __device_suspend_late(dev, pm_transition, true);
1228 	if (error) {
1229 		dpm_save_failed_dev(dev_name(dev));
1230 		pm_dev_err(dev, pm_transition, " async", error);
1231 	}
1232 	put_device(dev);
1233 }
1234 
device_suspend_late(struct device * dev)1235 static int device_suspend_late(struct device *dev)
1236 {
1237 	reinit_completion(&dev->power.completion);
1238 
1239 	if (is_async(dev)) {
1240 		get_device(dev);
1241 		async_schedule(async_suspend_late, dev);
1242 		return 0;
1243 	}
1244 
1245 	return __device_suspend_late(dev, pm_transition, false);
1246 }
1247 
1248 /**
1249  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1250  * @state: PM transition of the system being carried out.
1251  */
dpm_suspend_late(pm_message_t state)1252 int dpm_suspend_late(pm_message_t state)
1253 {
1254 	ktime_t starttime = ktime_get();
1255 	int error = 0;
1256 
1257 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1258 	mutex_lock(&dpm_list_mtx);
1259 	pm_transition = state;
1260 	async_error = 0;
1261 
1262 	while (!list_empty(&dpm_suspended_list)) {
1263 		struct device *dev = to_device(dpm_suspended_list.prev);
1264 
1265 		get_device(dev);
1266 		mutex_unlock(&dpm_list_mtx);
1267 
1268 		error = device_suspend_late(dev);
1269 
1270 		mutex_lock(&dpm_list_mtx);
1271 		if (!list_empty(&dev->power.entry))
1272 			list_move(&dev->power.entry, &dpm_late_early_list);
1273 
1274 		if (error) {
1275 			pm_dev_err(dev, state, " late", error);
1276 			dpm_save_failed_dev(dev_name(dev));
1277 			put_device(dev);
1278 			break;
1279 		}
1280 		put_device(dev);
1281 
1282 		if (async_error)
1283 			break;
1284 	}
1285 	mutex_unlock(&dpm_list_mtx);
1286 	async_synchronize_full();
1287 	if (!error)
1288 		error = async_error;
1289 	if (error) {
1290 		suspend_stats.failed_suspend_late++;
1291 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1292 		dpm_resume_early(resume_event(state));
1293 	} else {
1294 		dpm_show_time(starttime, state, "late");
1295 	}
1296 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1297 	return error;
1298 }
1299 
1300 /**
1301  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1302  * @state: PM transition of the system being carried out.
1303  */
dpm_suspend_end(pm_message_t state)1304 int dpm_suspend_end(pm_message_t state)
1305 {
1306 	int error = dpm_suspend_late(state);
1307 	if (error)
1308 		return error;
1309 
1310 	error = dpm_suspend_noirq(state);
1311 	if (error) {
1312 		dpm_resume_early(resume_event(state));
1313 		return error;
1314 	}
1315 
1316 	return 0;
1317 }
1318 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1319 
1320 /**
1321  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1322  * @dev: Device to suspend.
1323  * @state: PM transition of the system being carried out.
1324  * @cb: Suspend callback to execute.
1325  * @info: string description of caller.
1326  */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),char * info)1327 static int legacy_suspend(struct device *dev, pm_message_t state,
1328 			  int (*cb)(struct device *dev, pm_message_t state),
1329 			  char *info)
1330 {
1331 	int error;
1332 	ktime_t calltime;
1333 
1334 	calltime = initcall_debug_start(dev);
1335 
1336 	trace_device_pm_callback_start(dev, info, state.event);
1337 	error = cb(dev, state);
1338 	trace_device_pm_callback_end(dev, error);
1339 	suspend_report_result(cb, error);
1340 
1341 	initcall_debug_report(dev, calltime, error, state, info);
1342 
1343 	return error;
1344 }
1345 
1346 /**
1347  * device_suspend - Execute "suspend" callbacks for given device.
1348  * @dev: Device to handle.
1349  * @state: PM transition of the system being carried out.
1350  * @async: If true, the device is being suspended asynchronously.
1351  */
__device_suspend(struct device * dev,pm_message_t state,bool async)1352 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1353 {
1354 	pm_callback_t callback = NULL;
1355 	char *info = NULL;
1356 	int error = 0;
1357 	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
1358 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1359 
1360 	TRACE_DEVICE(dev);
1361 	TRACE_SUSPEND(0);
1362 
1363 	dpm_wait_for_children(dev, async);
1364 
1365 	if (async_error)
1366 		goto Complete;
1367 
1368 	/*
1369 	 * If a device configured to wake up the system from sleep states
1370 	 * has been suspended at run time and there's a resume request pending
1371 	 * for it, this is equivalent to the device signaling wakeup, so the
1372 	 * system suspend operation should be aborted.
1373 	 */
1374 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1375 		pm_wakeup_event(dev, 0);
1376 
1377 	if (pm_wakeup_pending()) {
1378 		pm_get_active_wakeup_sources(suspend_abort,
1379 			MAX_SUSPEND_ABORT_LEN);
1380 		log_suspend_abort_reason(suspend_abort);
1381 		async_error = -EBUSY;
1382 		goto Complete;
1383 	}
1384 
1385 	if (dev->power.syscore)
1386 		goto Complete;
1387 
1388 	if (dev->power.direct_complete) {
1389 		if (pm_runtime_status_suspended(dev)) {
1390 			pm_runtime_disable(dev);
1391 			if (pm_runtime_status_suspended(dev))
1392 				goto Complete;
1393 
1394 			pm_runtime_enable(dev);
1395 		}
1396 		dev->power.direct_complete = false;
1397 	}
1398 
1399 	dpm_watchdog_set(&wd, dev);
1400 	device_lock(dev);
1401 
1402 	if (dev->pm_domain) {
1403 		info = "power domain ";
1404 		callback = pm_op(&dev->pm_domain->ops, state);
1405 		goto Run;
1406 	}
1407 
1408 	if (dev->type && dev->type->pm) {
1409 		info = "type ";
1410 		callback = pm_op(dev->type->pm, state);
1411 		goto Run;
1412 	}
1413 
1414 	if (dev->class) {
1415 		if (dev->class->pm) {
1416 			info = "class ";
1417 			callback = pm_op(dev->class->pm, state);
1418 			goto Run;
1419 		} else if (dev->class->suspend) {
1420 			pm_dev_dbg(dev, state, "legacy class ");
1421 			error = legacy_suspend(dev, state, dev->class->suspend,
1422 						"legacy class ");
1423 			goto End;
1424 		}
1425 	}
1426 
1427 	if (dev->bus) {
1428 		if (dev->bus->pm) {
1429 			info = "bus ";
1430 			callback = pm_op(dev->bus->pm, state);
1431 		} else if (dev->bus->suspend) {
1432 			pm_dev_dbg(dev, state, "legacy bus ");
1433 			error = legacy_suspend(dev, state, dev->bus->suspend,
1434 						"legacy bus ");
1435 			goto End;
1436 		}
1437 	}
1438 
1439  Run:
1440 	if (!callback && dev->driver && dev->driver->pm) {
1441 		info = "driver ";
1442 		callback = pm_op(dev->driver->pm, state);
1443 	}
1444 
1445 	error = dpm_run_callback(callback, dev, state, info);
1446 
1447  End:
1448 	if (!error) {
1449 		struct device *parent = dev->parent;
1450 
1451 		dev->power.is_suspended = true;
1452 		if (parent) {
1453 			spin_lock_irq(&parent->power.lock);
1454 
1455 			dev->parent->power.direct_complete = false;
1456 			if (dev->power.wakeup_path
1457 			    && !dev->parent->power.ignore_children)
1458 				dev->parent->power.wakeup_path = true;
1459 
1460 			spin_unlock_irq(&parent->power.lock);
1461 		}
1462 	}
1463 
1464 	device_unlock(dev);
1465 	dpm_watchdog_clear(&wd);
1466 
1467  Complete:
1468 	complete_all(&dev->power.completion);
1469 	if (error)
1470 		async_error = error;
1471 
1472 	TRACE_SUSPEND(error);
1473 	return error;
1474 }
1475 
async_suspend(void * data,async_cookie_t cookie)1476 static void async_suspend(void *data, async_cookie_t cookie)
1477 {
1478 	struct device *dev = (struct device *)data;
1479 	int error;
1480 
1481 	error = __device_suspend(dev, pm_transition, true);
1482 	if (error) {
1483 		dpm_save_failed_dev(dev_name(dev));
1484 		pm_dev_err(dev, pm_transition, " async", error);
1485 	}
1486 
1487 	put_device(dev);
1488 }
1489 
device_suspend(struct device * dev)1490 static int device_suspend(struct device *dev)
1491 {
1492 	reinit_completion(&dev->power.completion);
1493 
1494 	if (is_async(dev)) {
1495 		get_device(dev);
1496 		async_schedule(async_suspend, dev);
1497 		return 0;
1498 	}
1499 
1500 	return __device_suspend(dev, pm_transition, false);
1501 }
1502 
1503 /**
1504  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1505  * @state: PM transition of the system being carried out.
1506  */
dpm_suspend(pm_message_t state)1507 int dpm_suspend(pm_message_t state)
1508 {
1509 	ktime_t starttime = ktime_get();
1510 	int error = 0;
1511 
1512 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1513 	might_sleep();
1514 
1515 	cpufreq_suspend();
1516 
1517 	mutex_lock(&dpm_list_mtx);
1518 	pm_transition = state;
1519 	async_error = 0;
1520 	while (!list_empty(&dpm_prepared_list)) {
1521 		struct device *dev = to_device(dpm_prepared_list.prev);
1522 
1523 		get_device(dev);
1524 		mutex_unlock(&dpm_list_mtx);
1525 
1526 		error = device_suspend(dev);
1527 
1528 		mutex_lock(&dpm_list_mtx);
1529 		if (error) {
1530 			pm_dev_err(dev, state, "", error);
1531 			dpm_save_failed_dev(dev_name(dev));
1532 			put_device(dev);
1533 			break;
1534 		}
1535 		if (!list_empty(&dev->power.entry))
1536 			list_move(&dev->power.entry, &dpm_suspended_list);
1537 		put_device(dev);
1538 		if (async_error)
1539 			break;
1540 	}
1541 	mutex_unlock(&dpm_list_mtx);
1542 	async_synchronize_full();
1543 	if (!error)
1544 		error = async_error;
1545 	if (error) {
1546 		suspend_stats.failed_suspend++;
1547 		dpm_save_failed_step(SUSPEND_SUSPEND);
1548 	} else
1549 		dpm_show_time(starttime, state, NULL);
1550 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1551 	return error;
1552 }
1553 
1554 /**
1555  * device_prepare - Prepare a device for system power transition.
1556  * @dev: Device to handle.
1557  * @state: PM transition of the system being carried out.
1558  *
1559  * Execute the ->prepare() callback(s) for given device.  No new children of the
1560  * device may be registered after this function has returned.
1561  */
device_prepare(struct device * dev,pm_message_t state)1562 static int device_prepare(struct device *dev, pm_message_t state)
1563 {
1564 	int (*callback)(struct device *) = NULL;
1565 	int ret = 0;
1566 
1567 	if (dev->power.syscore)
1568 		return 0;
1569 
1570 	/*
1571 	 * If a device's parent goes into runtime suspend at the wrong time,
1572 	 * it won't be possible to resume the device.  To prevent this we
1573 	 * block runtime suspend here, during the prepare phase, and allow
1574 	 * it again during the complete phase.
1575 	 */
1576 	pm_runtime_get_noresume(dev);
1577 
1578 	device_lock(dev);
1579 
1580 	dev->power.wakeup_path = device_may_wakeup(dev);
1581 
1582 	if (dev->power.no_pm_callbacks) {
1583 		ret = 1;	/* Let device go direct_complete */
1584 		goto unlock;
1585 	}
1586 
1587 	if (dev->pm_domain)
1588 		callback = dev->pm_domain->ops.prepare;
1589 	else if (dev->type && dev->type->pm)
1590 		callback = dev->type->pm->prepare;
1591 	else if (dev->class && dev->class->pm)
1592 		callback = dev->class->pm->prepare;
1593 	else if (dev->bus && dev->bus->pm)
1594 		callback = dev->bus->pm->prepare;
1595 
1596 	if (!callback && dev->driver && dev->driver->pm)
1597 		callback = dev->driver->pm->prepare;
1598 
1599 	if (callback)
1600 		ret = callback(dev);
1601 
1602 unlock:
1603 	device_unlock(dev);
1604 
1605 	if (ret < 0) {
1606 		suspend_report_result(callback, ret);
1607 		pm_runtime_put(dev);
1608 		return ret;
1609 	}
1610 	/*
1611 	 * A positive return value from ->prepare() means "this device appears
1612 	 * to be runtime-suspended and its state is fine, so if it really is
1613 	 * runtime-suspended, you can leave it in that state provided that you
1614 	 * will do the same thing with all of its descendants".  This only
1615 	 * applies to suspend transitions, however.
1616 	 */
1617 	spin_lock_irq(&dev->power.lock);
1618 	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1619 	spin_unlock_irq(&dev->power.lock);
1620 	return 0;
1621 }
1622 
1623 /**
1624  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1625  * @state: PM transition of the system being carried out.
1626  *
1627  * Execute the ->prepare() callback(s) for all devices.
1628  */
dpm_prepare(pm_message_t state)1629 int dpm_prepare(pm_message_t state)
1630 {
1631 	int error = 0;
1632 
1633 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1634 	might_sleep();
1635 
1636 	/*
1637 	 * Give a chance for the known devices to complete their probes, before
1638 	 * disable probing of devices. This sync point is important at least
1639 	 * at boot time + hibernation restore.
1640 	 */
1641 	wait_for_device_probe();
1642 	/*
1643 	 * It is unsafe if probing of devices will happen during suspend or
1644 	 * hibernation and system behavior will be unpredictable in this case.
1645 	 * So, let's prohibit device's probing here and defer their probes
1646 	 * instead. The normal behavior will be restored in dpm_complete().
1647 	 */
1648 	device_block_probing();
1649 
1650 	mutex_lock(&dpm_list_mtx);
1651 	while (!list_empty(&dpm_list)) {
1652 		struct device *dev = to_device(dpm_list.next);
1653 
1654 		get_device(dev);
1655 		mutex_unlock(&dpm_list_mtx);
1656 
1657 		trace_device_pm_callback_start(dev, "", state.event);
1658 		error = device_prepare(dev, state);
1659 		trace_device_pm_callback_end(dev, error);
1660 
1661 		mutex_lock(&dpm_list_mtx);
1662 		if (error) {
1663 			if (error == -EAGAIN) {
1664 				put_device(dev);
1665 				error = 0;
1666 				continue;
1667 			}
1668 			printk(KERN_INFO "PM: Device %s not prepared "
1669 				"for power transition: code %d\n",
1670 				dev_name(dev), error);
1671 			put_device(dev);
1672 			break;
1673 		}
1674 		dev->power.is_prepared = true;
1675 		if (!list_empty(&dev->power.entry))
1676 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1677 		put_device(dev);
1678 	}
1679 	mutex_unlock(&dpm_list_mtx);
1680 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1681 	return error;
1682 }
1683 
1684 /**
1685  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1686  * @state: PM transition of the system being carried out.
1687  *
1688  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1689  * callbacks for them.
1690  */
dpm_suspend_start(pm_message_t state)1691 int dpm_suspend_start(pm_message_t state)
1692 {
1693 	int error;
1694 
1695 	error = dpm_prepare(state);
1696 	if (error) {
1697 		suspend_stats.failed_prepare++;
1698 		dpm_save_failed_step(SUSPEND_PREPARE);
1699 	} else
1700 		error = dpm_suspend(state);
1701 	return error;
1702 }
1703 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1704 
__suspend_report_result(const char * function,void * fn,int ret)1705 void __suspend_report_result(const char *function, void *fn, int ret)
1706 {
1707 	if (ret)
1708 		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1709 }
1710 EXPORT_SYMBOL_GPL(__suspend_report_result);
1711 
1712 /**
1713  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1714  * @dev: Device to wait for.
1715  * @subordinate: Device that needs to wait for @dev.
1716  */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)1717 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1718 {
1719 	dpm_wait(dev, subordinate->power.async_suspend);
1720 	return async_error;
1721 }
1722 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1723 
1724 /**
1725  * dpm_for_each_dev - device iterator.
1726  * @data: data for the callback.
1727  * @fn: function to be called for each device.
1728  *
1729  * Iterate over devices in dpm_list, and call @fn for each device,
1730  * passing it @data.
1731  */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))1732 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1733 {
1734 	struct device *dev;
1735 
1736 	if (!fn)
1737 		return;
1738 
1739 	device_pm_lock();
1740 	list_for_each_entry(dev, &dpm_list, power.entry)
1741 		fn(dev, data);
1742 	device_pm_unlock();
1743 }
1744 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1745 
pm_ops_is_empty(const struct dev_pm_ops * ops)1746 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1747 {
1748 	if (!ops)
1749 		return true;
1750 
1751 	return !ops->prepare &&
1752 	       !ops->suspend &&
1753 	       !ops->suspend_late &&
1754 	       !ops->suspend_noirq &&
1755 	       !ops->resume_noirq &&
1756 	       !ops->resume_early &&
1757 	       !ops->resume &&
1758 	       !ops->complete;
1759 }
1760 
device_pm_check_callbacks(struct device * dev)1761 void device_pm_check_callbacks(struct device *dev)
1762 {
1763 	spin_lock_irq(&dev->power.lock);
1764 	dev->power.no_pm_callbacks =
1765 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1766 		 !dev->bus->suspend && !dev->bus->resume)) &&
1767 		(!dev->class || (pm_ops_is_empty(dev->class->pm) &&
1768 		 !dev->class->suspend && !dev->class->resume)) &&
1769 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1770 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1771 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1772 		 !dev->driver->suspend && !dev->driver->resume));
1773 	spin_unlock_irq(&dev->power.lock);
1774 }
1775