• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19 
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36 #include <linux/wakeup_reason.h>
37 
38 #include "../base.h"
39 #include "power.h"
40 
41 typedef int (*pm_callback_t)(struct device *);
42 
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52 
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58 
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62 
63 static int async_error;
64 
pm_verb(int event)65 static char *pm_verb(int event)
66 {
67 	switch (event) {
68 	case PM_EVENT_SUSPEND:
69 		return "suspend";
70 	case PM_EVENT_RESUME:
71 		return "resume";
72 	case PM_EVENT_FREEZE:
73 		return "freeze";
74 	case PM_EVENT_QUIESCE:
75 		return "quiesce";
76 	case PM_EVENT_HIBERNATE:
77 		return "hibernate";
78 	case PM_EVENT_THAW:
79 		return "thaw";
80 	case PM_EVENT_RESTORE:
81 		return "restore";
82 	case PM_EVENT_RECOVER:
83 		return "recover";
84 	default:
85 		return "(unknown PM event)";
86 	}
87 }
88 
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
device_pm_sleep_init(struct device * dev)93 void device_pm_sleep_init(struct device *dev)
94 {
95 	dev->power.is_prepared = false;
96 	dev->power.is_suspended = false;
97 	dev->power.is_noirq_suspended = false;
98 	dev->power.is_late_suspended = false;
99 	init_completion(&dev->power.completion);
100 	complete_all(&dev->power.completion);
101 	dev->power.wakeup = NULL;
102 	INIT_LIST_HEAD(&dev->power.entry);
103 }
104 
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
device_pm_lock(void)108 void device_pm_lock(void)
109 {
110 	mutex_lock(&dpm_list_mtx);
111 }
112 
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
device_pm_unlock(void)116 void device_pm_unlock(void)
117 {
118 	mutex_unlock(&dpm_list_mtx);
119 }
120 
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
device_pm_add(struct device * dev)125 void device_pm_add(struct device *dev)
126 {
127 	pr_debug("PM: Adding info for %s:%s\n",
128 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129 	device_pm_check_callbacks(dev);
130 	mutex_lock(&dpm_list_mtx);
131 	if (dev->parent && dev->parent->power.is_prepared)
132 		dev_warn(dev, "parent %s should not be sleeping\n",
133 			dev_name(dev->parent));
134 	list_add_tail(&dev->power.entry, &dpm_list);
135 	mutex_unlock(&dpm_list_mtx);
136 }
137 
138 /**
139  * device_pm_remove - Remove a device from the PM core's list of active devices.
140  * @dev: Device to be removed from the list.
141  */
device_pm_remove(struct device * dev)142 void device_pm_remove(struct device *dev)
143 {
144 	pr_debug("PM: Removing info for %s:%s\n",
145 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
146 	complete_all(&dev->power.completion);
147 	mutex_lock(&dpm_list_mtx);
148 	list_del_init(&dev->power.entry);
149 	mutex_unlock(&dpm_list_mtx);
150 	device_wakeup_disable(dev);
151 	pm_runtime_remove(dev);
152 	device_pm_check_callbacks(dev);
153 }
154 
155 /**
156  * device_pm_move_before - Move device in the PM core's list of active devices.
157  * @deva: Device to move in dpm_list.
158  * @devb: Device @deva should come before.
159  */
device_pm_move_before(struct device * deva,struct device * devb)160 void device_pm_move_before(struct device *deva, struct device *devb)
161 {
162 	pr_debug("PM: Moving %s:%s before %s:%s\n",
163 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
164 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
165 	/* Delete deva from dpm_list and reinsert before devb. */
166 	list_move_tail(&deva->power.entry, &devb->power.entry);
167 }
168 
169 /**
170  * device_pm_move_after - Move device in the PM core's list of active devices.
171  * @deva: Device to move in dpm_list.
172  * @devb: Device @deva should come after.
173  */
device_pm_move_after(struct device * deva,struct device * devb)174 void device_pm_move_after(struct device *deva, struct device *devb)
175 {
176 	pr_debug("PM: Moving %s:%s after %s:%s\n",
177 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
178 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
179 	/* Delete deva from dpm_list and reinsert after devb. */
180 	list_move(&deva->power.entry, &devb->power.entry);
181 }
182 
183 /**
184  * device_pm_move_last - Move device to end of the PM core's list of devices.
185  * @dev: Device to move in dpm_list.
186  */
device_pm_move_last(struct device * dev)187 void device_pm_move_last(struct device *dev)
188 {
189 	pr_debug("PM: Moving %s:%s to end of list\n",
190 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
191 	list_move_tail(&dev->power.entry, &dpm_list);
192 }
193 
initcall_debug_start(struct device * dev)194 static ktime_t initcall_debug_start(struct device *dev)
195 {
196 	ktime_t calltime = ktime_set(0, 0);
197 
198 	if (pm_print_times_enabled) {
199 		pr_info("calling  %s+ @ %i, parent: %s\n",
200 			dev_name(dev), task_pid_nr(current),
201 			dev->parent ? dev_name(dev->parent) : "none");
202 		calltime = ktime_get();
203 	}
204 
205 	return calltime;
206 }
207 
initcall_debug_report(struct device * dev,ktime_t calltime,int error,pm_message_t state,char * info)208 static void initcall_debug_report(struct device *dev, ktime_t calltime,
209 				  int error, pm_message_t state, char *info)
210 {
211 	ktime_t rettime;
212 	s64 nsecs;
213 
214 	rettime = ktime_get();
215 	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
216 
217 	if (pm_print_times_enabled) {
218 		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
219 			error, (unsigned long long)nsecs >> 10);
220 	}
221 }
222 
223 /**
224  * dpm_wait - Wait for a PM operation to complete.
225  * @dev: Device to wait for.
226  * @async: If unset, wait only if the device's power.async_suspend flag is set.
227  */
dpm_wait(struct device * dev,bool async)228 static void dpm_wait(struct device *dev, bool async)
229 {
230 	if (!dev)
231 		return;
232 
233 	if (async || (pm_async_enabled && dev->power.async_suspend))
234 		wait_for_completion(&dev->power.completion);
235 }
236 
dpm_wait_fn(struct device * dev,void * async_ptr)237 static int dpm_wait_fn(struct device *dev, void *async_ptr)
238 {
239 	dpm_wait(dev, *((bool *)async_ptr));
240 	return 0;
241 }
242 
dpm_wait_for_children(struct device * dev,bool async)243 static void dpm_wait_for_children(struct device *dev, bool async)
244 {
245        device_for_each_child(dev, &async, dpm_wait_fn);
246 }
247 
248 /**
249  * pm_op - Return the PM operation appropriate for given PM event.
250  * @ops: PM operations to choose from.
251  * @state: PM transition of the system being carried out.
252  */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)253 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
254 {
255 	switch (state.event) {
256 #ifdef CONFIG_SUSPEND
257 	case PM_EVENT_SUSPEND:
258 		return ops->suspend;
259 	case PM_EVENT_RESUME:
260 		return ops->resume;
261 #endif /* CONFIG_SUSPEND */
262 #ifdef CONFIG_HIBERNATE_CALLBACKS
263 	case PM_EVENT_FREEZE:
264 	case PM_EVENT_QUIESCE:
265 		return ops->freeze;
266 	case PM_EVENT_HIBERNATE:
267 		return ops->poweroff;
268 	case PM_EVENT_THAW:
269 	case PM_EVENT_RECOVER:
270 		return ops->thaw;
271 		break;
272 	case PM_EVENT_RESTORE:
273 		return ops->restore;
274 #endif /* CONFIG_HIBERNATE_CALLBACKS */
275 	}
276 
277 	return NULL;
278 }
279 
280 /**
281  * pm_late_early_op - Return the PM operation appropriate for given PM event.
282  * @ops: PM operations to choose from.
283  * @state: PM transition of the system being carried out.
284  *
285  * Runtime PM is disabled for @dev while this function is being executed.
286  */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)287 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
288 				      pm_message_t state)
289 {
290 	switch (state.event) {
291 #ifdef CONFIG_SUSPEND
292 	case PM_EVENT_SUSPEND:
293 		return ops->suspend_late;
294 	case PM_EVENT_RESUME:
295 		return ops->resume_early;
296 #endif /* CONFIG_SUSPEND */
297 #ifdef CONFIG_HIBERNATE_CALLBACKS
298 	case PM_EVENT_FREEZE:
299 	case PM_EVENT_QUIESCE:
300 		return ops->freeze_late;
301 	case PM_EVENT_HIBERNATE:
302 		return ops->poweroff_late;
303 	case PM_EVENT_THAW:
304 	case PM_EVENT_RECOVER:
305 		return ops->thaw_early;
306 	case PM_EVENT_RESTORE:
307 		return ops->restore_early;
308 #endif /* CONFIG_HIBERNATE_CALLBACKS */
309 	}
310 
311 	return NULL;
312 }
313 
314 /**
315  * pm_noirq_op - Return the PM operation appropriate for given PM event.
316  * @ops: PM operations to choose from.
317  * @state: PM transition of the system being carried out.
318  *
319  * The driver of @dev will not receive interrupts while this function is being
320  * executed.
321  */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)322 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
323 {
324 	switch (state.event) {
325 #ifdef CONFIG_SUSPEND
326 	case PM_EVENT_SUSPEND:
327 		return ops->suspend_noirq;
328 	case PM_EVENT_RESUME:
329 		return ops->resume_noirq;
330 #endif /* CONFIG_SUSPEND */
331 #ifdef CONFIG_HIBERNATE_CALLBACKS
332 	case PM_EVENT_FREEZE:
333 	case PM_EVENT_QUIESCE:
334 		return ops->freeze_noirq;
335 	case PM_EVENT_HIBERNATE:
336 		return ops->poweroff_noirq;
337 	case PM_EVENT_THAW:
338 	case PM_EVENT_RECOVER:
339 		return ops->thaw_noirq;
340 	case PM_EVENT_RESTORE:
341 		return ops->restore_noirq;
342 #endif /* CONFIG_HIBERNATE_CALLBACKS */
343 	}
344 
345 	return NULL;
346 }
347 
pm_dev_dbg(struct device * dev,pm_message_t state,char * info)348 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
349 {
350 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
351 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
352 		", may wakeup" : "");
353 }
354 
pm_dev_err(struct device * dev,pm_message_t state,char * info,int error)355 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
356 			int error)
357 {
358 	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
359 		dev_name(dev), pm_verb(state.event), info, error);
360 }
361 
dpm_show_time(ktime_t starttime,pm_message_t state,char * info)362 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
363 {
364 	ktime_t calltime;
365 	u64 usecs64;
366 	int usecs;
367 
368 	calltime = ktime_get();
369 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
370 	do_div(usecs64, NSEC_PER_USEC);
371 	usecs = usecs64;
372 	if (usecs == 0)
373 		usecs = 1;
374 	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
375 		info ?: "", info ? " " : "", pm_verb(state.event),
376 		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
377 }
378 
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,char * info)379 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
380 			    pm_message_t state, char *info)
381 {
382 	ktime_t calltime;
383 	int error;
384 
385 	if (!cb)
386 		return 0;
387 
388 	calltime = initcall_debug_start(dev);
389 
390 	pm_dev_dbg(dev, state, info);
391 	trace_device_pm_callback_start(dev, info, state.event);
392 	error = cb(dev);
393 	trace_device_pm_callback_end(dev, error);
394 	suspend_report_result(cb, error);
395 
396 	initcall_debug_report(dev, calltime, error, state, info);
397 
398 	return error;
399 }
400 
401 #ifdef CONFIG_DPM_WATCHDOG
402 struct dpm_watchdog {
403 	struct device		*dev;
404 	struct task_struct	*tsk;
405 	struct timer_list	timer;
406 };
407 
408 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
409 	struct dpm_watchdog wd
410 
411 /**
412  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
413  * @data: Watchdog object address.
414  *
415  * Called when a driver has timed out suspending or resuming.
416  * There's not much we can do here to recover so panic() to
417  * capture a crash-dump in pstore.
418  */
dpm_watchdog_handler(unsigned long data)419 static void dpm_watchdog_handler(unsigned long data)
420 {
421 	struct dpm_watchdog *wd = (void *)data;
422 
423 	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
424 	show_stack(wd->tsk, NULL);
425 	panic("%s %s: unrecoverable failure\n",
426 		dev_driver_string(wd->dev), dev_name(wd->dev));
427 }
428 
429 /**
430  * dpm_watchdog_set - Enable pm watchdog for given device.
431  * @wd: Watchdog. Must be allocated on the stack.
432  * @dev: Device to handle.
433  */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)434 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
435 {
436 	struct timer_list *timer = &wd->timer;
437 
438 	wd->dev = dev;
439 	wd->tsk = current;
440 
441 	init_timer_on_stack(timer);
442 	/* use same timeout value for both suspend and resume */
443 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
444 	timer->function = dpm_watchdog_handler;
445 	timer->data = (unsigned long)wd;
446 	add_timer(timer);
447 }
448 
449 /**
450  * dpm_watchdog_clear - Disable suspend/resume watchdog.
451  * @wd: Watchdog to disable.
452  */
dpm_watchdog_clear(struct dpm_watchdog * wd)453 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
454 {
455 	struct timer_list *timer = &wd->timer;
456 
457 	del_timer_sync(timer);
458 	destroy_timer_on_stack(timer);
459 }
460 #else
461 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
462 #define dpm_watchdog_set(x, y)
463 #define dpm_watchdog_clear(x)
464 #endif
465 
466 /*------------------------- Resume routines -------------------------*/
467 
468 /**
469  * device_resume_noirq - Execute an "early resume" callback for given device.
470  * @dev: Device to handle.
471  * @state: PM transition of the system being carried out.
472  * @async: If true, the device is being resumed asynchronously.
473  *
474  * The driver of @dev will not receive interrupts while this function is being
475  * executed.
476  */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)477 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
478 {
479 	pm_callback_t callback = NULL;
480 	char *info = NULL;
481 	int error = 0;
482 
483 	TRACE_DEVICE(dev);
484 	TRACE_RESUME(0);
485 
486 	if (dev->power.syscore || dev->power.direct_complete)
487 		goto Out;
488 
489 	if (!dev->power.is_noirq_suspended)
490 		goto Out;
491 
492 	dpm_wait(dev->parent, async);
493 
494 	if (dev->pm_domain) {
495 		info = "noirq power domain ";
496 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
497 	} else if (dev->type && dev->type->pm) {
498 		info = "noirq type ";
499 		callback = pm_noirq_op(dev->type->pm, state);
500 	} else if (dev->class && dev->class->pm) {
501 		info = "noirq class ";
502 		callback = pm_noirq_op(dev->class->pm, state);
503 	} else if (dev->bus && dev->bus->pm) {
504 		info = "noirq bus ";
505 		callback = pm_noirq_op(dev->bus->pm, state);
506 	}
507 
508 	if (!callback && dev->driver && dev->driver->pm) {
509 		info = "noirq driver ";
510 		callback = pm_noirq_op(dev->driver->pm, state);
511 	}
512 
513 	error = dpm_run_callback(callback, dev, state, info);
514 	dev->power.is_noirq_suspended = false;
515 
516  Out:
517 	complete_all(&dev->power.completion);
518 	TRACE_RESUME(error);
519 	return error;
520 }
521 
is_async(struct device * dev)522 static bool is_async(struct device *dev)
523 {
524 	return dev->power.async_suspend && pm_async_enabled
525 		&& !pm_trace_is_enabled();
526 }
527 
async_resume_noirq(void * data,async_cookie_t cookie)528 static void async_resume_noirq(void *data, async_cookie_t cookie)
529 {
530 	struct device *dev = (struct device *)data;
531 	int error;
532 
533 	error = device_resume_noirq(dev, pm_transition, true);
534 	if (error)
535 		pm_dev_err(dev, pm_transition, " async", error);
536 
537 	put_device(dev);
538 }
539 
540 /**
541  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
542  * @state: PM transition of the system being carried out.
543  *
544  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
545  * enable device drivers to receive interrupts.
546  */
dpm_resume_noirq(pm_message_t state)547 void dpm_resume_noirq(pm_message_t state)
548 {
549 	struct device *dev;
550 	ktime_t starttime = ktime_get();
551 
552 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
553 	mutex_lock(&dpm_list_mtx);
554 	pm_transition = state;
555 
556 	/*
557 	 * Advanced the async threads upfront,
558 	 * in case the starting of async threads is
559 	 * delayed by non-async resuming devices.
560 	 */
561 	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
562 		reinit_completion(&dev->power.completion);
563 		if (is_async(dev)) {
564 			get_device(dev);
565 			async_schedule(async_resume_noirq, dev);
566 		}
567 	}
568 
569 	while (!list_empty(&dpm_noirq_list)) {
570 		dev = to_device(dpm_noirq_list.next);
571 		get_device(dev);
572 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
573 		mutex_unlock(&dpm_list_mtx);
574 
575 		if (!is_async(dev)) {
576 			int error;
577 
578 			error = device_resume_noirq(dev, state, false);
579 			if (error) {
580 				suspend_stats.failed_resume_noirq++;
581 				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
582 				dpm_save_failed_dev(dev_name(dev));
583 				pm_dev_err(dev, state, " noirq", error);
584 			}
585 		}
586 
587 		mutex_lock(&dpm_list_mtx);
588 		put_device(dev);
589 	}
590 	mutex_unlock(&dpm_list_mtx);
591 	async_synchronize_full();
592 	dpm_show_time(starttime, state, "noirq");
593 	resume_device_irqs();
594 	device_wakeup_disarm_wake_irqs();
595 	cpuidle_resume();
596 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
597 }
598 
599 /**
600  * device_resume_early - Execute an "early resume" callback for given device.
601  * @dev: Device to handle.
602  * @state: PM transition of the system being carried out.
603  * @async: If true, the device is being resumed asynchronously.
604  *
605  * Runtime PM is disabled for @dev while this function is being executed.
606  */
device_resume_early(struct device * dev,pm_message_t state,bool async)607 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
608 {
609 	pm_callback_t callback = NULL;
610 	char *info = NULL;
611 	int error = 0;
612 
613 	TRACE_DEVICE(dev);
614 	TRACE_RESUME(0);
615 
616 	if (dev->power.syscore || dev->power.direct_complete)
617 		goto Out;
618 
619 	if (!dev->power.is_late_suspended)
620 		goto Out;
621 
622 	dpm_wait(dev->parent, async);
623 
624 	if (dev->pm_domain) {
625 		info = "early power domain ";
626 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
627 	} else if (dev->type && dev->type->pm) {
628 		info = "early type ";
629 		callback = pm_late_early_op(dev->type->pm, state);
630 	} else if (dev->class && dev->class->pm) {
631 		info = "early class ";
632 		callback = pm_late_early_op(dev->class->pm, state);
633 	} else if (dev->bus && dev->bus->pm) {
634 		info = "early bus ";
635 		callback = pm_late_early_op(dev->bus->pm, state);
636 	}
637 
638 	if (!callback && dev->driver && dev->driver->pm) {
639 		info = "early driver ";
640 		callback = pm_late_early_op(dev->driver->pm, state);
641 	}
642 
643 	error = dpm_run_callback(callback, dev, state, info);
644 	dev->power.is_late_suspended = false;
645 
646  Out:
647 	TRACE_RESUME(error);
648 
649 	pm_runtime_enable(dev);
650 	complete_all(&dev->power.completion);
651 	return error;
652 }
653 
async_resume_early(void * data,async_cookie_t cookie)654 static void async_resume_early(void *data, async_cookie_t cookie)
655 {
656 	struct device *dev = (struct device *)data;
657 	int error;
658 
659 	error = device_resume_early(dev, pm_transition, true);
660 	if (error)
661 		pm_dev_err(dev, pm_transition, " async", error);
662 
663 	put_device(dev);
664 }
665 
666 /**
667  * dpm_resume_early - Execute "early resume" callbacks for all devices.
668  * @state: PM transition of the system being carried out.
669  */
dpm_resume_early(pm_message_t state)670 void dpm_resume_early(pm_message_t state)
671 {
672 	struct device *dev;
673 	ktime_t starttime = ktime_get();
674 
675 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
676 	mutex_lock(&dpm_list_mtx);
677 	pm_transition = state;
678 
679 	/*
680 	 * Advanced the async threads upfront,
681 	 * in case the starting of async threads is
682 	 * delayed by non-async resuming devices.
683 	 */
684 	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
685 		reinit_completion(&dev->power.completion);
686 		if (is_async(dev)) {
687 			get_device(dev);
688 			async_schedule(async_resume_early, dev);
689 		}
690 	}
691 
692 	while (!list_empty(&dpm_late_early_list)) {
693 		dev = to_device(dpm_late_early_list.next);
694 		get_device(dev);
695 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
696 		mutex_unlock(&dpm_list_mtx);
697 
698 		if (!is_async(dev)) {
699 			int error;
700 
701 			error = device_resume_early(dev, state, false);
702 			if (error) {
703 				suspend_stats.failed_resume_early++;
704 				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
705 				dpm_save_failed_dev(dev_name(dev));
706 				pm_dev_err(dev, state, " early", error);
707 			}
708 		}
709 		mutex_lock(&dpm_list_mtx);
710 		put_device(dev);
711 	}
712 	mutex_unlock(&dpm_list_mtx);
713 	async_synchronize_full();
714 	dpm_show_time(starttime, state, "early");
715 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
716 }
717 
718 /**
719  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
720  * @state: PM transition of the system being carried out.
721  */
dpm_resume_start(pm_message_t state)722 void dpm_resume_start(pm_message_t state)
723 {
724 	dpm_resume_noirq(state);
725 	dpm_resume_early(state);
726 }
727 EXPORT_SYMBOL_GPL(dpm_resume_start);
728 
729 /**
730  * device_resume - Execute "resume" callbacks for given device.
731  * @dev: Device to handle.
732  * @state: PM transition of the system being carried out.
733  * @async: If true, the device is being resumed asynchronously.
734  */
device_resume(struct device * dev,pm_message_t state,bool async)735 static int device_resume(struct device *dev, pm_message_t state, bool async)
736 {
737 	pm_callback_t callback = NULL;
738 	char *info = NULL;
739 	int error = 0;
740 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
741 
742 	TRACE_DEVICE(dev);
743 	TRACE_RESUME(0);
744 
745 	if (dev->power.syscore)
746 		goto Complete;
747 
748 	if (dev->power.direct_complete) {
749 		/* Match the pm_runtime_disable() in __device_suspend(). */
750 		pm_runtime_enable(dev);
751 		goto Complete;
752 	}
753 
754 	dpm_wait(dev->parent, async);
755 	dpm_watchdog_set(&wd, dev);
756 	device_lock(dev);
757 
758 	/*
759 	 * This is a fib.  But we'll allow new children to be added below
760 	 * a resumed device, even if the device hasn't been completed yet.
761 	 */
762 	dev->power.is_prepared = false;
763 
764 	if (!dev->power.is_suspended)
765 		goto Unlock;
766 
767 	if (dev->pm_domain) {
768 		info = "power domain ";
769 		callback = pm_op(&dev->pm_domain->ops, state);
770 		goto Driver;
771 	}
772 
773 	if (dev->type && dev->type->pm) {
774 		info = "type ";
775 		callback = pm_op(dev->type->pm, state);
776 		goto Driver;
777 	}
778 
779 	if (dev->class) {
780 		if (dev->class->pm) {
781 			info = "class ";
782 			callback = pm_op(dev->class->pm, state);
783 			goto Driver;
784 		} else if (dev->class->resume) {
785 			info = "legacy class ";
786 			callback = dev->class->resume;
787 			goto End;
788 		}
789 	}
790 
791 	if (dev->bus) {
792 		if (dev->bus->pm) {
793 			info = "bus ";
794 			callback = pm_op(dev->bus->pm, state);
795 		} else if (dev->bus->resume) {
796 			info = "legacy bus ";
797 			callback = dev->bus->resume;
798 			goto End;
799 		}
800 	}
801 
802  Driver:
803 	if (!callback && dev->driver && dev->driver->pm) {
804 		info = "driver ";
805 		callback = pm_op(dev->driver->pm, state);
806 	}
807 
808  End:
809 	error = dpm_run_callback(callback, dev, state, info);
810 	dev->power.is_suspended = false;
811 
812  Unlock:
813 	device_unlock(dev);
814 	dpm_watchdog_clear(&wd);
815 
816  Complete:
817 	complete_all(&dev->power.completion);
818 
819 	TRACE_RESUME(error);
820 
821 	return error;
822 }
823 
async_resume(void * data,async_cookie_t cookie)824 static void async_resume(void *data, async_cookie_t cookie)
825 {
826 	struct device *dev = (struct device *)data;
827 	int error;
828 
829 	error = device_resume(dev, pm_transition, true);
830 	if (error)
831 		pm_dev_err(dev, pm_transition, " async", error);
832 	put_device(dev);
833 }
834 
835 /**
836  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
837  * @state: PM transition of the system being carried out.
838  *
839  * Execute the appropriate "resume" callback for all devices whose status
840  * indicates that they are suspended.
841  */
dpm_resume(pm_message_t state)842 void dpm_resume(pm_message_t state)
843 {
844 	struct device *dev;
845 	ktime_t starttime = ktime_get();
846 
847 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
848 	might_sleep();
849 
850 	mutex_lock(&dpm_list_mtx);
851 	pm_transition = state;
852 	async_error = 0;
853 
854 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
855 		reinit_completion(&dev->power.completion);
856 		if (is_async(dev)) {
857 			get_device(dev);
858 			async_schedule(async_resume, dev);
859 		}
860 	}
861 
862 	while (!list_empty(&dpm_suspended_list)) {
863 		dev = to_device(dpm_suspended_list.next);
864 		get_device(dev);
865 		if (!is_async(dev)) {
866 			int error;
867 
868 			mutex_unlock(&dpm_list_mtx);
869 
870 			error = device_resume(dev, state, false);
871 			if (error) {
872 				suspend_stats.failed_resume++;
873 				dpm_save_failed_step(SUSPEND_RESUME);
874 				dpm_save_failed_dev(dev_name(dev));
875 				pm_dev_err(dev, state, "", error);
876 			}
877 
878 			mutex_lock(&dpm_list_mtx);
879 		}
880 		if (!list_empty(&dev->power.entry))
881 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
882 		put_device(dev);
883 	}
884 	mutex_unlock(&dpm_list_mtx);
885 	async_synchronize_full();
886 	dpm_show_time(starttime, state, NULL);
887 
888 	cpufreq_resume();
889 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
890 }
891 
892 /**
893  * device_complete - Complete a PM transition for given device.
894  * @dev: Device to handle.
895  * @state: PM transition of the system being carried out.
896  */
device_complete(struct device * dev,pm_message_t state)897 static void device_complete(struct device *dev, pm_message_t state)
898 {
899 	void (*callback)(struct device *) = NULL;
900 	char *info = NULL;
901 
902 	if (dev->power.syscore)
903 		return;
904 
905 	device_lock(dev);
906 
907 	if (dev->pm_domain) {
908 		info = "completing power domain ";
909 		callback = dev->pm_domain->ops.complete;
910 	} else if (dev->type && dev->type->pm) {
911 		info = "completing type ";
912 		callback = dev->type->pm->complete;
913 	} else if (dev->class && dev->class->pm) {
914 		info = "completing class ";
915 		callback = dev->class->pm->complete;
916 	} else if (dev->bus && dev->bus->pm) {
917 		info = "completing bus ";
918 		callback = dev->bus->pm->complete;
919 	}
920 
921 	if (!callback && dev->driver && dev->driver->pm) {
922 		info = "completing driver ";
923 		callback = dev->driver->pm->complete;
924 	}
925 
926 	if (callback) {
927 		pm_dev_dbg(dev, state, info);
928 		callback(dev);
929 	}
930 
931 	device_unlock(dev);
932 
933 	pm_runtime_put(dev);
934 }
935 
936 /**
937  * dpm_complete - Complete a PM transition for all non-sysdev devices.
938  * @state: PM transition of the system being carried out.
939  *
940  * Execute the ->complete() callbacks for all devices whose PM status is not
941  * DPM_ON (this allows new devices to be registered).
942  */
dpm_complete(pm_message_t state)943 void dpm_complete(pm_message_t state)
944 {
945 	struct list_head list;
946 
947 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
948 	might_sleep();
949 
950 	INIT_LIST_HEAD(&list);
951 	mutex_lock(&dpm_list_mtx);
952 	while (!list_empty(&dpm_prepared_list)) {
953 		struct device *dev = to_device(dpm_prepared_list.prev);
954 
955 		get_device(dev);
956 		dev->power.is_prepared = false;
957 		list_move(&dev->power.entry, &list);
958 		mutex_unlock(&dpm_list_mtx);
959 
960 		trace_device_pm_callback_start(dev, "", state.event);
961 		device_complete(dev, state);
962 		trace_device_pm_callback_end(dev, 0);
963 
964 		mutex_lock(&dpm_list_mtx);
965 		put_device(dev);
966 	}
967 	list_splice(&list, &dpm_list);
968 	mutex_unlock(&dpm_list_mtx);
969 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
970 }
971 
972 /**
973  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
974  * @state: PM transition of the system being carried out.
975  *
976  * Execute "resume" callbacks for all devices and complete the PM transition of
977  * the system.
978  */
dpm_resume_end(pm_message_t state)979 void dpm_resume_end(pm_message_t state)
980 {
981 	dpm_resume(state);
982 	dpm_complete(state);
983 }
984 EXPORT_SYMBOL_GPL(dpm_resume_end);
985 
986 
987 /*------------------------- Suspend routines -------------------------*/
988 
989 /**
990  * resume_event - Return a "resume" message for given "suspend" sleep state.
991  * @sleep_state: PM message representing a sleep state.
992  *
993  * Return a PM message representing the resume event corresponding to given
994  * sleep state.
995  */
resume_event(pm_message_t sleep_state)996 static pm_message_t resume_event(pm_message_t sleep_state)
997 {
998 	switch (sleep_state.event) {
999 	case PM_EVENT_SUSPEND:
1000 		return PMSG_RESUME;
1001 	case PM_EVENT_FREEZE:
1002 	case PM_EVENT_QUIESCE:
1003 		return PMSG_RECOVER;
1004 	case PM_EVENT_HIBERNATE:
1005 		return PMSG_RESTORE;
1006 	}
1007 	return PMSG_ON;
1008 }
1009 
1010 /**
1011  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1012  * @dev: Device to handle.
1013  * @state: PM transition of the system being carried out.
1014  * @async: If true, the device is being suspended asynchronously.
1015  *
1016  * The driver of @dev will not receive interrupts while this function is being
1017  * executed.
1018  */
__device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1019 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1020 {
1021 	pm_callback_t callback = NULL;
1022 	char *info = NULL;
1023 	int error = 0;
1024 
1025 	TRACE_DEVICE(dev);
1026 	TRACE_SUSPEND(0);
1027 
1028 	dpm_wait_for_children(dev, async);
1029 
1030 	if (async_error)
1031 		goto Complete;
1032 
1033 	if (pm_wakeup_pending()) {
1034 		async_error = -EBUSY;
1035 		goto Complete;
1036 	}
1037 
1038 	if (dev->power.syscore || dev->power.direct_complete)
1039 		goto Complete;
1040 
1041 	if (dev->pm_domain) {
1042 		info = "noirq power domain ";
1043 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1044 	} else if (dev->type && dev->type->pm) {
1045 		info = "noirq type ";
1046 		callback = pm_noirq_op(dev->type->pm, state);
1047 	} else if (dev->class && dev->class->pm) {
1048 		info = "noirq class ";
1049 		callback = pm_noirq_op(dev->class->pm, state);
1050 	} else if (dev->bus && dev->bus->pm) {
1051 		info = "noirq bus ";
1052 		callback = pm_noirq_op(dev->bus->pm, state);
1053 	}
1054 
1055 	if (!callback && dev->driver && dev->driver->pm) {
1056 		info = "noirq driver ";
1057 		callback = pm_noirq_op(dev->driver->pm, state);
1058 	}
1059 
1060 	error = dpm_run_callback(callback, dev, state, info);
1061 	if (!error)
1062 		dev->power.is_noirq_suspended = true;
1063 	else
1064 		async_error = error;
1065 
1066 Complete:
1067 	complete_all(&dev->power.completion);
1068 	TRACE_SUSPEND(error);
1069 	return error;
1070 }
1071 
async_suspend_noirq(void * data,async_cookie_t cookie)1072 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1073 {
1074 	struct device *dev = (struct device *)data;
1075 	int error;
1076 
1077 	error = __device_suspend_noirq(dev, pm_transition, true);
1078 	if (error) {
1079 		dpm_save_failed_dev(dev_name(dev));
1080 		pm_dev_err(dev, pm_transition, " async", error);
1081 	}
1082 
1083 	put_device(dev);
1084 }
1085 
device_suspend_noirq(struct device * dev)1086 static int device_suspend_noirq(struct device *dev)
1087 {
1088 	reinit_completion(&dev->power.completion);
1089 
1090 	if (is_async(dev)) {
1091 		get_device(dev);
1092 		async_schedule(async_suspend_noirq, dev);
1093 		return 0;
1094 	}
1095 	return __device_suspend_noirq(dev, pm_transition, false);
1096 }
1097 
1098 /**
1099  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1100  * @state: PM transition of the system being carried out.
1101  *
1102  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1103  * handlers for all non-sysdev devices.
1104  */
dpm_suspend_noirq(pm_message_t state)1105 int dpm_suspend_noirq(pm_message_t state)
1106 {
1107 	ktime_t starttime = ktime_get();
1108 	int error = 0;
1109 
1110 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1111 	cpuidle_pause();
1112 	device_wakeup_arm_wake_irqs();
1113 	suspend_device_irqs();
1114 	mutex_lock(&dpm_list_mtx);
1115 	pm_transition = state;
1116 	async_error = 0;
1117 
1118 	while (!list_empty(&dpm_late_early_list)) {
1119 		struct device *dev = to_device(dpm_late_early_list.prev);
1120 
1121 		get_device(dev);
1122 		mutex_unlock(&dpm_list_mtx);
1123 
1124 		error = device_suspend_noirq(dev);
1125 
1126 		mutex_lock(&dpm_list_mtx);
1127 		if (error) {
1128 			pm_dev_err(dev, state, " noirq", error);
1129 			dpm_save_failed_dev(dev_name(dev));
1130 			put_device(dev);
1131 			break;
1132 		}
1133 		if (!list_empty(&dev->power.entry))
1134 			list_move(&dev->power.entry, &dpm_noirq_list);
1135 		put_device(dev);
1136 
1137 		if (async_error)
1138 			break;
1139 	}
1140 	mutex_unlock(&dpm_list_mtx);
1141 	async_synchronize_full();
1142 	if (!error)
1143 		error = async_error;
1144 
1145 	if (error) {
1146 		suspend_stats.failed_suspend_noirq++;
1147 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1148 		dpm_resume_noirq(resume_event(state));
1149 	} else {
1150 		dpm_show_time(starttime, state, "noirq");
1151 	}
1152 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1153 	return error;
1154 }
1155 
1156 /**
1157  * device_suspend_late - Execute a "late suspend" callback for given device.
1158  * @dev: Device to handle.
1159  * @state: PM transition of the system being carried out.
1160  * @async: If true, the device is being suspended asynchronously.
1161  *
1162  * Runtime PM is disabled for @dev while this function is being executed.
1163  */
__device_suspend_late(struct device * dev,pm_message_t state,bool async)1164 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1165 {
1166 	pm_callback_t callback = NULL;
1167 	char *info = NULL;
1168 	int error = 0;
1169 
1170 	TRACE_DEVICE(dev);
1171 	TRACE_SUSPEND(0);
1172 
1173 	__pm_runtime_disable(dev, false);
1174 
1175 	dpm_wait_for_children(dev, async);
1176 
1177 	if (async_error)
1178 		goto Complete;
1179 
1180 	if (pm_wakeup_pending()) {
1181 		async_error = -EBUSY;
1182 		goto Complete;
1183 	}
1184 
1185 	if (dev->power.syscore || dev->power.direct_complete)
1186 		goto Complete;
1187 
1188 	if (dev->pm_domain) {
1189 		info = "late power domain ";
1190 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1191 	} else if (dev->type && dev->type->pm) {
1192 		info = "late type ";
1193 		callback = pm_late_early_op(dev->type->pm, state);
1194 	} else if (dev->class && dev->class->pm) {
1195 		info = "late class ";
1196 		callback = pm_late_early_op(dev->class->pm, state);
1197 	} else if (dev->bus && dev->bus->pm) {
1198 		info = "late bus ";
1199 		callback = pm_late_early_op(dev->bus->pm, state);
1200 	}
1201 
1202 	if (!callback && dev->driver && dev->driver->pm) {
1203 		info = "late driver ";
1204 		callback = pm_late_early_op(dev->driver->pm, state);
1205 	}
1206 
1207 	error = dpm_run_callback(callback, dev, state, info);
1208 	if (!error)
1209 		dev->power.is_late_suspended = true;
1210 	else
1211 		async_error = error;
1212 
1213 Complete:
1214 	TRACE_SUSPEND(error);
1215 	complete_all(&dev->power.completion);
1216 	return error;
1217 }
1218 
async_suspend_late(void * data,async_cookie_t cookie)1219 static void async_suspend_late(void *data, async_cookie_t cookie)
1220 {
1221 	struct device *dev = (struct device *)data;
1222 	int error;
1223 
1224 	error = __device_suspend_late(dev, pm_transition, true);
1225 	if (error) {
1226 		dpm_save_failed_dev(dev_name(dev));
1227 		pm_dev_err(dev, pm_transition, " async", error);
1228 	}
1229 	put_device(dev);
1230 }
1231 
device_suspend_late(struct device * dev)1232 static int device_suspend_late(struct device *dev)
1233 {
1234 	reinit_completion(&dev->power.completion);
1235 
1236 	if (is_async(dev)) {
1237 		get_device(dev);
1238 		async_schedule(async_suspend_late, dev);
1239 		return 0;
1240 	}
1241 
1242 	return __device_suspend_late(dev, pm_transition, false);
1243 }
1244 
1245 /**
1246  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1247  * @state: PM transition of the system being carried out.
1248  */
dpm_suspend_late(pm_message_t state)1249 int dpm_suspend_late(pm_message_t state)
1250 {
1251 	ktime_t starttime = ktime_get();
1252 	int error = 0;
1253 
1254 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1255 	mutex_lock(&dpm_list_mtx);
1256 	pm_transition = state;
1257 	async_error = 0;
1258 
1259 	while (!list_empty(&dpm_suspended_list)) {
1260 		struct device *dev = to_device(dpm_suspended_list.prev);
1261 
1262 		get_device(dev);
1263 		mutex_unlock(&dpm_list_mtx);
1264 
1265 		error = device_suspend_late(dev);
1266 
1267 		mutex_lock(&dpm_list_mtx);
1268 		if (!list_empty(&dev->power.entry))
1269 			list_move(&dev->power.entry, &dpm_late_early_list);
1270 
1271 		if (error) {
1272 			pm_dev_err(dev, state, " late", error);
1273 			dpm_save_failed_dev(dev_name(dev));
1274 			put_device(dev);
1275 			break;
1276 		}
1277 		put_device(dev);
1278 
1279 		if (async_error)
1280 			break;
1281 	}
1282 	mutex_unlock(&dpm_list_mtx);
1283 	async_synchronize_full();
1284 	if (!error)
1285 		error = async_error;
1286 	if (error) {
1287 		suspend_stats.failed_suspend_late++;
1288 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1289 		dpm_resume_early(resume_event(state));
1290 	} else {
1291 		dpm_show_time(starttime, state, "late");
1292 	}
1293 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1294 	return error;
1295 }
1296 
1297 /**
1298  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1299  * @state: PM transition of the system being carried out.
1300  */
dpm_suspend_end(pm_message_t state)1301 int dpm_suspend_end(pm_message_t state)
1302 {
1303 	int error = dpm_suspend_late(state);
1304 	if (error)
1305 		return error;
1306 
1307 	error = dpm_suspend_noirq(state);
1308 	if (error) {
1309 		dpm_resume_early(resume_event(state));
1310 		return error;
1311 	}
1312 
1313 	return 0;
1314 }
1315 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1316 
1317 /**
1318  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1319  * @dev: Device to suspend.
1320  * @state: PM transition of the system being carried out.
1321  * @cb: Suspend callback to execute.
1322  * @info: string description of caller.
1323  */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),char * info)1324 static int legacy_suspend(struct device *dev, pm_message_t state,
1325 			  int (*cb)(struct device *dev, pm_message_t state),
1326 			  char *info)
1327 {
1328 	int error;
1329 	ktime_t calltime;
1330 
1331 	calltime = initcall_debug_start(dev);
1332 
1333 	trace_device_pm_callback_start(dev, info, state.event);
1334 	error = cb(dev, state);
1335 	trace_device_pm_callback_end(dev, error);
1336 	suspend_report_result(cb, error);
1337 
1338 	initcall_debug_report(dev, calltime, error, state, info);
1339 
1340 	return error;
1341 }
1342 
1343 /**
1344  * device_suspend - Execute "suspend" callbacks for given device.
1345  * @dev: Device to handle.
1346  * @state: PM transition of the system being carried out.
1347  * @async: If true, the device is being suspended asynchronously.
1348  */
__device_suspend(struct device * dev,pm_message_t state,bool async)1349 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1350 {
1351 	pm_callback_t callback = NULL;
1352 	char *info = NULL;
1353 	int error = 0;
1354 	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
1355 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1356 
1357 	TRACE_DEVICE(dev);
1358 	TRACE_SUSPEND(0);
1359 
1360 	dpm_wait_for_children(dev, async);
1361 
1362 	if (async_error) {
1363 		dev->power.direct_complete = false;
1364 		goto Complete;
1365 	}
1366 
1367 	/*
1368 	 * Wait for possible runtime PM transitions of the device in progress
1369 	 * to complete and if there's a runtime resume request pending for it,
1370 	 * resume it before proceeding with invoking the system-wide suspend
1371 	 * callbacks for it.
1372 	 *
1373 	 * If the system-wide suspend callbacks below change the configuration
1374 	 * of the device, they must disable runtime PM for it or otherwise
1375 	 * ensure that its runtime-resume callbacks will not be confused by that
1376 	 * change in case they are invoked going forward.
1377 	 */
1378 	pm_runtime_barrier(dev);
1379 
1380 	if (pm_wakeup_pending()) {
1381 		pm_get_active_wakeup_sources(suspend_abort,
1382 			MAX_SUSPEND_ABORT_LEN);
1383 		log_suspend_abort_reason(suspend_abort);
1384 		dev->power.direct_complete = false;
1385 		async_error = -EBUSY;
1386 		goto Complete;
1387 	}
1388 
1389 	if (dev->power.syscore)
1390 		goto Complete;
1391 
1392 	/* Avoid direct_complete to let wakeup_path propagate. */
1393 	if (device_may_wakeup(dev) || dev->power.wakeup_path)
1394 		dev->power.direct_complete = false;
1395 
1396 	if (dev->power.direct_complete) {
1397 		if (pm_runtime_status_suspended(dev)) {
1398 			pm_runtime_disable(dev);
1399 			if (pm_runtime_status_suspended(dev))
1400 				goto Complete;
1401 
1402 			pm_runtime_enable(dev);
1403 		}
1404 		dev->power.direct_complete = false;
1405 	}
1406 
1407 	dpm_watchdog_set(&wd, dev);
1408 	device_lock(dev);
1409 
1410 	if (dev->pm_domain) {
1411 		info = "power domain ";
1412 		callback = pm_op(&dev->pm_domain->ops, state);
1413 		goto Run;
1414 	}
1415 
1416 	if (dev->type && dev->type->pm) {
1417 		info = "type ";
1418 		callback = pm_op(dev->type->pm, state);
1419 		goto Run;
1420 	}
1421 
1422 	if (dev->class) {
1423 		if (dev->class->pm) {
1424 			info = "class ";
1425 			callback = pm_op(dev->class->pm, state);
1426 			goto Run;
1427 		} else if (dev->class->suspend) {
1428 			pm_dev_dbg(dev, state, "legacy class ");
1429 			error = legacy_suspend(dev, state, dev->class->suspend,
1430 						"legacy class ");
1431 			goto End;
1432 		}
1433 	}
1434 
1435 	if (dev->bus) {
1436 		if (dev->bus->pm) {
1437 			info = "bus ";
1438 			callback = pm_op(dev->bus->pm, state);
1439 		} else if (dev->bus->suspend) {
1440 			pm_dev_dbg(dev, state, "legacy bus ");
1441 			error = legacy_suspend(dev, state, dev->bus->suspend,
1442 						"legacy bus ");
1443 			goto End;
1444 		}
1445 	}
1446 
1447  Run:
1448 	if (!callback && dev->driver && dev->driver->pm) {
1449 		info = "driver ";
1450 		callback = pm_op(dev->driver->pm, state);
1451 	}
1452 
1453 	error = dpm_run_callback(callback, dev, state, info);
1454 
1455  End:
1456 	if (!error) {
1457 		struct device *parent = dev->parent;
1458 
1459 		dev->power.is_suspended = true;
1460 		if (parent) {
1461 			spin_lock_irq(&parent->power.lock);
1462 
1463 			dev->parent->power.direct_complete = false;
1464 			if (dev->power.wakeup_path
1465 			    && !dev->parent->power.ignore_children)
1466 				dev->parent->power.wakeup_path = true;
1467 
1468 			spin_unlock_irq(&parent->power.lock);
1469 		}
1470 	}
1471 
1472 	device_unlock(dev);
1473 	dpm_watchdog_clear(&wd);
1474 
1475  Complete:
1476 	complete_all(&dev->power.completion);
1477 	if (error)
1478 		async_error = error;
1479 
1480 	TRACE_SUSPEND(error);
1481 	return error;
1482 }
1483 
async_suspend(void * data,async_cookie_t cookie)1484 static void async_suspend(void *data, async_cookie_t cookie)
1485 {
1486 	struct device *dev = (struct device *)data;
1487 	int error;
1488 
1489 	error = __device_suspend(dev, pm_transition, true);
1490 	if (error) {
1491 		dpm_save_failed_dev(dev_name(dev));
1492 		pm_dev_err(dev, pm_transition, " async", error);
1493 	}
1494 
1495 	put_device(dev);
1496 }
1497 
device_suspend(struct device * dev)1498 static int device_suspend(struct device *dev)
1499 {
1500 	reinit_completion(&dev->power.completion);
1501 
1502 	if (is_async(dev)) {
1503 		get_device(dev);
1504 		async_schedule(async_suspend, dev);
1505 		return 0;
1506 	}
1507 
1508 	return __device_suspend(dev, pm_transition, false);
1509 }
1510 
1511 /**
1512  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1513  * @state: PM transition of the system being carried out.
1514  */
dpm_suspend(pm_message_t state)1515 int dpm_suspend(pm_message_t state)
1516 {
1517 	ktime_t starttime = ktime_get();
1518 	int error = 0;
1519 
1520 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1521 	might_sleep();
1522 
1523 	cpufreq_suspend();
1524 
1525 	mutex_lock(&dpm_list_mtx);
1526 	pm_transition = state;
1527 	async_error = 0;
1528 	while (!list_empty(&dpm_prepared_list)) {
1529 		struct device *dev = to_device(dpm_prepared_list.prev);
1530 
1531 		get_device(dev);
1532 		mutex_unlock(&dpm_list_mtx);
1533 
1534 		error = device_suspend(dev);
1535 
1536 		mutex_lock(&dpm_list_mtx);
1537 		if (error) {
1538 			pm_dev_err(dev, state, "", error);
1539 			dpm_save_failed_dev(dev_name(dev));
1540 			put_device(dev);
1541 			break;
1542 		}
1543 		if (!list_empty(&dev->power.entry))
1544 			list_move(&dev->power.entry, &dpm_suspended_list);
1545 		put_device(dev);
1546 		if (async_error)
1547 			break;
1548 	}
1549 	mutex_unlock(&dpm_list_mtx);
1550 	async_synchronize_full();
1551 	if (!error)
1552 		error = async_error;
1553 	if (error) {
1554 		suspend_stats.failed_suspend++;
1555 		dpm_save_failed_step(SUSPEND_SUSPEND);
1556 	} else
1557 		dpm_show_time(starttime, state, NULL);
1558 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1559 	return error;
1560 }
1561 
1562 /**
1563  * device_prepare - Prepare a device for system power transition.
1564  * @dev: Device to handle.
1565  * @state: PM transition of the system being carried out.
1566  *
1567  * Execute the ->prepare() callback(s) for given device.  No new children of the
1568  * device may be registered after this function has returned.
1569  */
device_prepare(struct device * dev,pm_message_t state)1570 static int device_prepare(struct device *dev, pm_message_t state)
1571 {
1572 	int (*callback)(struct device *) = NULL;
1573 	char *info = NULL;
1574 	int ret = 0;
1575 
1576 	if (dev->power.syscore)
1577 		return 0;
1578 
1579 	/*
1580 	 * If a device's parent goes into runtime suspend at the wrong time,
1581 	 * it won't be possible to resume the device.  To prevent this we
1582 	 * block runtime suspend here, during the prepare phase, and allow
1583 	 * it again during the complete phase.
1584 	 */
1585 	pm_runtime_get_noresume(dev);
1586 
1587 	device_lock(dev);
1588 
1589 	dev->power.wakeup_path = device_may_wakeup(dev);
1590 
1591 	if (dev->power.no_pm_callbacks) {
1592 		ret = 1;	/* Let device go direct_complete */
1593 		goto unlock;
1594 	}
1595 
1596 	if (dev->pm_domain) {
1597 		info = "preparing power domain ";
1598 		callback = dev->pm_domain->ops.prepare;
1599 	} else if (dev->type && dev->type->pm) {
1600 		info = "preparing type ";
1601 		callback = dev->type->pm->prepare;
1602 	} else if (dev->class && dev->class->pm) {
1603 		info = "preparing class ";
1604 		callback = dev->class->pm->prepare;
1605 	} else if (dev->bus && dev->bus->pm) {
1606 		info = "preparing bus ";
1607 		callback = dev->bus->pm->prepare;
1608 	}
1609 
1610 	if (!callback && dev->driver && dev->driver->pm) {
1611 		info = "preparing driver ";
1612 		callback = dev->driver->pm->prepare;
1613 	}
1614 
1615 	if (callback)
1616 		ret = callback(dev);
1617 
1618 unlock:
1619 	device_unlock(dev);
1620 
1621 	if (ret < 0) {
1622 		suspend_report_result(callback, ret);
1623 		pm_runtime_put(dev);
1624 		return ret;
1625 	}
1626 	/*
1627 	 * A positive return value from ->prepare() means "this device appears
1628 	 * to be runtime-suspended and its state is fine, so if it really is
1629 	 * runtime-suspended, you can leave it in that state provided that you
1630 	 * will do the same thing with all of its descendants".  This only
1631 	 * applies to suspend transitions, however.
1632 	 */
1633 	spin_lock_irq(&dev->power.lock);
1634 	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1635 	spin_unlock_irq(&dev->power.lock);
1636 	return 0;
1637 }
1638 
1639 /**
1640  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1641  * @state: PM transition of the system being carried out.
1642  *
1643  * Execute the ->prepare() callback(s) for all devices.
1644  */
dpm_prepare(pm_message_t state)1645 int dpm_prepare(pm_message_t state)
1646 {
1647 	int error = 0;
1648 
1649 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1650 	might_sleep();
1651 
1652 	mutex_lock(&dpm_list_mtx);
1653 	while (!list_empty(&dpm_list)) {
1654 		struct device *dev = to_device(dpm_list.next);
1655 
1656 		get_device(dev);
1657 		mutex_unlock(&dpm_list_mtx);
1658 
1659 		trace_device_pm_callback_start(dev, "", state.event);
1660 		error = device_prepare(dev, state);
1661 		trace_device_pm_callback_end(dev, error);
1662 
1663 		mutex_lock(&dpm_list_mtx);
1664 		if (error) {
1665 			if (error == -EAGAIN) {
1666 				put_device(dev);
1667 				error = 0;
1668 				continue;
1669 			}
1670 			printk(KERN_INFO "PM: Device %s not prepared "
1671 				"for power transition: code %d\n",
1672 				dev_name(dev), error);
1673 			put_device(dev);
1674 			break;
1675 		}
1676 		dev->power.is_prepared = true;
1677 		if (!list_empty(&dev->power.entry))
1678 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1679 		put_device(dev);
1680 	}
1681 	mutex_unlock(&dpm_list_mtx);
1682 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1683 	return error;
1684 }
1685 
1686 /**
1687  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1688  * @state: PM transition of the system being carried out.
1689  *
1690  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1691  * callbacks for them.
1692  */
dpm_suspend_start(pm_message_t state)1693 int dpm_suspend_start(pm_message_t state)
1694 {
1695 	int error;
1696 
1697 	error = dpm_prepare(state);
1698 	if (error) {
1699 		suspend_stats.failed_prepare++;
1700 		dpm_save_failed_step(SUSPEND_PREPARE);
1701 	} else
1702 		error = dpm_suspend(state);
1703 	return error;
1704 }
1705 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1706 
__suspend_report_result(const char * function,void * fn,int ret)1707 void __suspend_report_result(const char *function, void *fn, int ret)
1708 {
1709 	if (ret)
1710 		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1711 }
1712 EXPORT_SYMBOL_GPL(__suspend_report_result);
1713 
1714 /**
1715  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1716  * @dev: Device to wait for.
1717  * @subordinate: Device that needs to wait for @dev.
1718  */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)1719 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1720 {
1721 	dpm_wait(dev, subordinate->power.async_suspend);
1722 	return async_error;
1723 }
1724 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1725 
1726 /**
1727  * dpm_for_each_dev - device iterator.
1728  * @data: data for the callback.
1729  * @fn: function to be called for each device.
1730  *
1731  * Iterate over devices in dpm_list, and call @fn for each device,
1732  * passing it @data.
1733  */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))1734 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1735 {
1736 	struct device *dev;
1737 
1738 	if (!fn)
1739 		return;
1740 
1741 	device_pm_lock();
1742 	list_for_each_entry(dev, &dpm_list, power.entry)
1743 		fn(dev, data);
1744 	device_pm_unlock();
1745 }
1746 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1747 
pm_ops_is_empty(const struct dev_pm_ops * ops)1748 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1749 {
1750 	if (!ops)
1751 		return true;
1752 
1753 	return !ops->prepare &&
1754 	       !ops->suspend &&
1755 	       !ops->suspend_late &&
1756 	       !ops->suspend_noirq &&
1757 	       !ops->resume_noirq &&
1758 	       !ops->resume_early &&
1759 	       !ops->resume &&
1760 	       !ops->complete;
1761 }
1762 
device_pm_check_callbacks(struct device * dev)1763 void device_pm_check_callbacks(struct device *dev)
1764 {
1765 	spin_lock_irq(&dev->power.lock);
1766 	dev->power.no_pm_callbacks =
1767 		(!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
1768 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1769 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1770 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1771 		(!dev->driver || pm_ops_is_empty(dev->driver->pm));
1772 	spin_unlock_irq(&dev->power.lock);
1773 }
1774