• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/runtime.c - Helper functions for device runtime PM
4  *
5  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7  */
8 #include <linux/sched/mm.h>
9 #include <linux/ktime.h>
10 #include <linux/hrtimer.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <trace/events/rpm.h>
15 
16 #include "../base.h"
17 #include "power.h"
18 
19 typedef int (*pm_callback_t)(struct device *);
20 
__rpm_get_callback(struct device * dev,size_t cb_offset)21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
22 {
23 	pm_callback_t cb;
24 	const struct dev_pm_ops *ops;
25 
26 	if (dev->pm_domain)
27 		ops = &dev->pm_domain->ops;
28 	else if (dev->type && dev->type->pm)
29 		ops = dev->type->pm;
30 	else if (dev->class && dev->class->pm)
31 		ops = dev->class->pm;
32 	else if (dev->bus && dev->bus->pm)
33 		ops = dev->bus->pm;
34 	else
35 		ops = NULL;
36 
37 	if (ops)
38 		cb = *(pm_callback_t *)((void *)ops + cb_offset);
39 	else
40 		cb = NULL;
41 
42 	if (!cb && dev->driver && dev->driver->pm)
43 		cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44 
45 	return cb;
46 }
47 
48 #define RPM_GET_CALLBACK(dev, callback) \
49 		__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
50 
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
53 
54 /**
55  * update_pm_runtime_accounting - Update the time accounting of power states
56  * @dev: Device to update the accounting for
57  *
58  * In order to be able to have time accounting of the various power states
59  * (as used by programs such as PowerTOP to show the effectiveness of runtime
60  * PM), we need to track the time spent in each state.
61  * update_pm_runtime_accounting must be called each time before the
62  * runtime_status field is updated, to account the time in the old state
63  * correctly.
64  */
update_pm_runtime_accounting(struct device * dev)65 static void update_pm_runtime_accounting(struct device *dev)
66 {
67 	u64 now, last, delta;
68 
69 	if (dev->power.disable_depth > 0)
70 		return;
71 
72 	last = dev->power.accounting_timestamp;
73 
74 	now = ktime_get_mono_fast_ns();
75 	dev->power.accounting_timestamp = now;
76 
77 	/*
78 	 * Because ktime_get_mono_fast_ns() is not monotonic during
79 	 * timekeeping updates, ensure that 'now' is after the last saved
80 	 * timesptamp.
81 	 */
82 	if (now < last)
83 		return;
84 
85 	delta = now - last;
86 
87 	if (dev->power.runtime_status == RPM_SUSPENDED)
88 		dev->power.suspended_time += delta;
89 	else
90 		dev->power.active_time += delta;
91 }
92 
__update_runtime_status(struct device * dev,enum rpm_status status)93 static void __update_runtime_status(struct device *dev, enum rpm_status status)
94 {
95 	update_pm_runtime_accounting(dev);
96 	trace_rpm_status(dev, status);
97 	dev->power.runtime_status = status;
98 }
99 
rpm_get_accounted_time(struct device * dev,bool suspended)100 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
101 {
102 	u64 time;
103 	unsigned long flags;
104 
105 	spin_lock_irqsave(&dev->power.lock, flags);
106 
107 	update_pm_runtime_accounting(dev);
108 	time = suspended ? dev->power.suspended_time : dev->power.active_time;
109 
110 	spin_unlock_irqrestore(&dev->power.lock, flags);
111 
112 	return time;
113 }
114 
pm_runtime_active_time(struct device * dev)115 u64 pm_runtime_active_time(struct device *dev)
116 {
117 	return rpm_get_accounted_time(dev, false);
118 }
119 
pm_runtime_suspended_time(struct device * dev)120 u64 pm_runtime_suspended_time(struct device *dev)
121 {
122 	return rpm_get_accounted_time(dev, true);
123 }
124 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
125 
126 /**
127  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
128  * @dev: Device to handle.
129  */
pm_runtime_deactivate_timer(struct device * dev)130 static void pm_runtime_deactivate_timer(struct device *dev)
131 {
132 	if (dev->power.timer_expires > 0) {
133 		hrtimer_try_to_cancel(&dev->power.suspend_timer);
134 		dev->power.timer_expires = 0;
135 	}
136 }
137 
138 /**
139  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
140  * @dev: Device to handle.
141  */
pm_runtime_cancel_pending(struct device * dev)142 static void pm_runtime_cancel_pending(struct device *dev)
143 {
144 	pm_runtime_deactivate_timer(dev);
145 	/*
146 	 * In case there's a request pending, make sure its work function will
147 	 * return without doing anything.
148 	 */
149 	dev->power.request = RPM_REQ_NONE;
150 }
151 
152 /*
153  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
154  * @dev: Device to handle.
155  *
156  * Compute the autosuspend-delay expiration time based on the device's
157  * power.last_busy time.  If the delay has already expired or is disabled
158  * (negative) or the power.use_autosuspend flag isn't set, return 0.
159  * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
160  *
161  * This function may be called either with or without dev->power.lock held.
162  * Either way it can be racy, since power.last_busy may be updated at any time.
163  */
pm_runtime_autosuspend_expiration(struct device * dev)164 u64 pm_runtime_autosuspend_expiration(struct device *dev)
165 {
166 	int autosuspend_delay;
167 	u64 expires;
168 
169 	if (!dev->power.use_autosuspend)
170 		return 0;
171 
172 	autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
173 	if (autosuspend_delay < 0)
174 		return 0;
175 
176 	expires  = READ_ONCE(dev->power.last_busy);
177 	expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
178 	if (expires > ktime_get_mono_fast_ns())
179 		return expires;	/* Expires in the future */
180 
181 	return 0;
182 }
183 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
184 
dev_memalloc_noio(struct device * dev,void * data)185 static int dev_memalloc_noio(struct device *dev, void *data)
186 {
187 	return dev->power.memalloc_noio;
188 }
189 
190 /*
191  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
192  * @dev: Device to handle.
193  * @enable: True for setting the flag and False for clearing the flag.
194  *
195  * Set the flag for all devices in the path from the device to the
196  * root device in the device tree if @enable is true, otherwise clear
197  * the flag for devices in the path whose siblings don't set the flag.
198  *
199  * The function should only be called by block device, or network
200  * device driver for solving the deadlock problem during runtime
201  * resume/suspend:
202  *
203  *     If memory allocation with GFP_KERNEL is called inside runtime
204  *     resume/suspend callback of any one of its ancestors(or the
205  *     block device itself), the deadlock may be triggered inside the
206  *     memory allocation since it might not complete until the block
207  *     device becomes active and the involed page I/O finishes. The
208  *     situation is pointed out first by Alan Stern. Network device
209  *     are involved in iSCSI kind of situation.
210  *
211  * The lock of dev_hotplug_mutex is held in the function for handling
212  * hotplug race because pm_runtime_set_memalloc_noio() may be called
213  * in async probe().
214  *
215  * The function should be called between device_add() and device_del()
216  * on the affected device(block/network device).
217  */
pm_runtime_set_memalloc_noio(struct device * dev,bool enable)218 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
219 {
220 	static DEFINE_MUTEX(dev_hotplug_mutex);
221 
222 	mutex_lock(&dev_hotplug_mutex);
223 	for (;;) {
224 		bool enabled;
225 
226 		/* hold power lock since bitfield is not SMP-safe. */
227 		spin_lock_irq(&dev->power.lock);
228 		enabled = dev->power.memalloc_noio;
229 		dev->power.memalloc_noio = enable;
230 		spin_unlock_irq(&dev->power.lock);
231 
232 		/*
233 		 * not need to enable ancestors any more if the device
234 		 * has been enabled.
235 		 */
236 		if (enabled && enable)
237 			break;
238 
239 		dev = dev->parent;
240 
241 		/*
242 		 * clear flag of the parent device only if all the
243 		 * children don't set the flag because ancestor's
244 		 * flag was set by any one of the descendants.
245 		 */
246 		if (!dev || (!enable &&
247 			     device_for_each_child(dev, NULL,
248 						   dev_memalloc_noio)))
249 			break;
250 	}
251 	mutex_unlock(&dev_hotplug_mutex);
252 }
253 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
254 
255 /**
256  * rpm_check_suspend_allowed - Test whether a device may be suspended.
257  * @dev: Device to test.
258  */
rpm_check_suspend_allowed(struct device * dev)259 static int rpm_check_suspend_allowed(struct device *dev)
260 {
261 	int retval = 0;
262 
263 	if (dev->power.runtime_error)
264 		retval = -EINVAL;
265 	else if (dev->power.disable_depth > 0)
266 		retval = -EACCES;
267 	else if (atomic_read(&dev->power.usage_count))
268 		retval = -EAGAIN;
269 	else if (!dev->power.ignore_children &&
270 			atomic_read(&dev->power.child_count))
271 		retval = -EBUSY;
272 
273 	/* Pending resume requests take precedence over suspends. */
274 	else if ((dev->power.deferred_resume
275 			&& dev->power.runtime_status == RPM_SUSPENDING)
276 	    || (dev->power.request_pending
277 			&& dev->power.request == RPM_REQ_RESUME))
278 		retval = -EAGAIN;
279 	else if (__dev_pm_qos_resume_latency(dev) == 0)
280 		retval = -EPERM;
281 	else if (dev->power.runtime_status == RPM_SUSPENDED)
282 		retval = 1;
283 
284 	return retval;
285 }
286 
rpm_get_suppliers(struct device * dev)287 static int rpm_get_suppliers(struct device *dev)
288 {
289 	struct device_link *link;
290 
291 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
292 				device_links_read_lock_held()) {
293 		int retval;
294 
295 		if (!(link->flags & DL_FLAG_PM_RUNTIME))
296 			continue;
297 
298 		retval = pm_runtime_get_sync(link->supplier);
299 		/* Ignore suppliers with disabled runtime PM. */
300 		if (retval < 0 && retval != -EACCES) {
301 			pm_runtime_put_noidle(link->supplier);
302 			return retval;
303 		}
304 		refcount_inc(&link->rpm_active);
305 	}
306 	return 0;
307 }
308 
309 /**
310  * pm_runtime_release_supplier - Drop references to device link's supplier.
311  * @link: Target device link.
312  *
313  * Drop all runtime PM references associated with @link to its supplier device.
314  */
pm_runtime_release_supplier(struct device_link * link)315 void pm_runtime_release_supplier(struct device_link *link)
316 {
317 	struct device *supplier = link->supplier;
318 
319 	/*
320 	 * The additional power.usage_count check is a safety net in case
321 	 * the rpm_active refcount becomes saturated, in which case
322 	 * refcount_dec_not_one() would return true forever, but it is not
323 	 * strictly necessary.
324 	 */
325 	while (refcount_dec_not_one(&link->rpm_active) &&
326 	       atomic_read(&supplier->power.usage_count) > 0)
327 		pm_runtime_put_noidle(supplier);
328 }
329 
__rpm_put_suppliers(struct device * dev,bool try_to_suspend)330 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
331 {
332 	struct device_link *link;
333 
334 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
335 				device_links_read_lock_held()) {
336 		pm_runtime_release_supplier(link);
337 		if (try_to_suspend)
338 			pm_request_idle(link->supplier);
339 	}
340 }
341 
rpm_put_suppliers(struct device * dev)342 static void rpm_put_suppliers(struct device *dev)
343 {
344 	__rpm_put_suppliers(dev, true);
345 }
346 
rpm_suspend_suppliers(struct device * dev)347 static void rpm_suspend_suppliers(struct device *dev)
348 {
349 	struct device_link *link;
350 	int idx = device_links_read_lock();
351 
352 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
353 				device_links_read_lock_held())
354 		pm_request_idle(link->supplier);
355 
356 	device_links_read_unlock(idx);
357 }
358 
359 /**
360  * __rpm_callback - Run a given runtime PM callback for a given device.
361  * @cb: Runtime PM callback to run.
362  * @dev: Device to run the callback for.
363  */
__rpm_callback(int (* cb)(struct device *),struct device * dev)364 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
365 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
366 {
367 	int retval = 0, idx;
368 	bool use_links = dev->power.links_count > 0;
369 
370 	if (dev->power.irq_safe) {
371 		spin_unlock(&dev->power.lock);
372 	} else {
373 		spin_unlock_irq(&dev->power.lock);
374 
375 		/*
376 		 * Resume suppliers if necessary.
377 		 *
378 		 * The device's runtime PM status cannot change until this
379 		 * routine returns, so it is safe to read the status outside of
380 		 * the lock.
381 		 */
382 		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
383 			idx = device_links_read_lock();
384 
385 			retval = rpm_get_suppliers(dev);
386 			if (retval) {
387 				rpm_put_suppliers(dev);
388 				goto fail;
389 			}
390 
391 			device_links_read_unlock(idx);
392 		}
393 	}
394 
395 	if (cb)
396 		retval = cb(dev);
397 
398 	if (dev->power.irq_safe) {
399 		spin_lock(&dev->power.lock);
400 	} else {
401 		/*
402 		 * If the device is suspending and the callback has returned
403 		 * success, drop the usage counters of the suppliers that have
404 		 * been reference counted on its resume.
405 		 *
406 		 * Do that if resume fails too.
407 		 */
408 		if (use_links
409 		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
410 		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
411 			idx = device_links_read_lock();
412 
413 			__rpm_put_suppliers(dev, false);
414 
415 fail:
416 			device_links_read_unlock(idx);
417 		}
418 
419 		spin_lock_irq(&dev->power.lock);
420 	}
421 
422 	return retval;
423 }
424 
425 /**
426  * rpm_idle - Notify device bus type if the device can be suspended.
427  * @dev: Device to notify the bus type about.
428  * @rpmflags: Flag bits.
429  *
430  * Check if the device's runtime PM status allows it to be suspended.  If
431  * another idle notification has been started earlier, return immediately.  If
432  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
433  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
434  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
435  *
436  * This function must be called under dev->power.lock with interrupts disabled.
437  */
rpm_idle(struct device * dev,int rpmflags)438 static int rpm_idle(struct device *dev, int rpmflags)
439 {
440 	int (*callback)(struct device *);
441 	int retval;
442 
443 	trace_rpm_idle_rcuidle(dev, rpmflags);
444 	retval = rpm_check_suspend_allowed(dev);
445 	if (retval < 0)
446 		;	/* Conditions are wrong. */
447 
448 	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
449 	else if (dev->power.runtime_status != RPM_ACTIVE)
450 		retval = -EAGAIN;
451 
452 	/*
453 	 * Any pending request other than an idle notification takes
454 	 * precedence over us, except that the timer may be running.
455 	 */
456 	else if (dev->power.request_pending &&
457 	    dev->power.request > RPM_REQ_IDLE)
458 		retval = -EAGAIN;
459 
460 	/* Act as though RPM_NOWAIT is always set. */
461 	else if (dev->power.idle_notification)
462 		retval = -EINPROGRESS;
463 	if (retval)
464 		goto out;
465 
466 	/* Pending requests need to be canceled. */
467 	dev->power.request = RPM_REQ_NONE;
468 
469 	callback = RPM_GET_CALLBACK(dev, runtime_idle);
470 
471 	/* If no callback assume success. */
472 	if (!callback || dev->power.no_callbacks)
473 		goto out;
474 
475 	/* Carry out an asynchronous or a synchronous idle notification. */
476 	if (rpmflags & RPM_ASYNC) {
477 		dev->power.request = RPM_REQ_IDLE;
478 		if (!dev->power.request_pending) {
479 			dev->power.request_pending = true;
480 			queue_work(pm_wq, &dev->power.work);
481 		}
482 		trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
483 		return 0;
484 	}
485 
486 	dev->power.idle_notification = true;
487 
488 	if (dev->power.irq_safe)
489 		spin_unlock(&dev->power.lock);
490 	else
491 		spin_unlock_irq(&dev->power.lock);
492 
493 	retval = callback(dev);
494 
495 	if (dev->power.irq_safe)
496 		spin_lock(&dev->power.lock);
497 	else
498 		spin_lock_irq(&dev->power.lock);
499 
500 	dev->power.idle_notification = false;
501 	wake_up_all(&dev->power.wait_queue);
502 
503  out:
504 	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
505 	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
506 }
507 
508 /**
509  * rpm_callback - Run a given runtime PM callback for a given device.
510  * @cb: Runtime PM callback to run.
511  * @dev: Device to run the callback for.
512  */
rpm_callback(int (* cb)(struct device *),struct device * dev)513 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
514 {
515 	int retval;
516 
517 	if (dev->power.memalloc_noio) {
518 		unsigned int noio_flag;
519 
520 		/*
521 		 * Deadlock might be caused if memory allocation with
522 		 * GFP_KERNEL happens inside runtime_suspend and
523 		 * runtime_resume callbacks of one block device's
524 		 * ancestor or the block device itself. Network
525 		 * device might be thought as part of iSCSI block
526 		 * device, so network device and its ancestor should
527 		 * be marked as memalloc_noio too.
528 		 */
529 		noio_flag = memalloc_noio_save();
530 		retval = __rpm_callback(cb, dev);
531 		memalloc_noio_restore(noio_flag);
532 	} else {
533 		retval = __rpm_callback(cb, dev);
534 	}
535 
536 	dev->power.runtime_error = retval;
537 	return retval != -EACCES ? retval : -EIO;
538 }
539 
540 /**
541  * rpm_suspend - Carry out runtime suspend of given device.
542  * @dev: Device to suspend.
543  * @rpmflags: Flag bits.
544  *
545  * Check if the device's runtime PM status allows it to be suspended.
546  * Cancel a pending idle notification, autosuspend or suspend. If
547  * another suspend has been started earlier, either return immediately
548  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
549  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
550  * otherwise run the ->runtime_suspend() callback directly. When
551  * ->runtime_suspend succeeded, if a deferred resume was requested while
552  * the callback was running then carry it out, otherwise send an idle
553  * notification for its parent (if the suspend succeeded and both
554  * ignore_children of parent->power and irq_safe of dev->power are not set).
555  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
556  * flag is set and the next autosuspend-delay expiration time is in the
557  * future, schedule another autosuspend attempt.
558  *
559  * This function must be called under dev->power.lock with interrupts disabled.
560  */
rpm_suspend(struct device * dev,int rpmflags)561 static int rpm_suspend(struct device *dev, int rpmflags)
562 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
563 {
564 	int (*callback)(struct device *);
565 	struct device *parent = NULL;
566 	int retval;
567 
568 	trace_rpm_suspend_rcuidle(dev, rpmflags);
569 
570  repeat:
571 	retval = rpm_check_suspend_allowed(dev);
572 	if (retval < 0)
573 		goto out;	/* Conditions are wrong. */
574 
575 	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
576 	if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
577 		retval = -EAGAIN;
578 	if (retval)
579 		goto out;
580 
581 	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
582 	if ((rpmflags & RPM_AUTO)
583 	    && dev->power.runtime_status != RPM_SUSPENDING) {
584 		u64 expires = pm_runtime_autosuspend_expiration(dev);
585 
586 		if (expires != 0) {
587 			/* Pending requests need to be canceled. */
588 			dev->power.request = RPM_REQ_NONE;
589 
590 			/*
591 			 * Optimization: If the timer is already running and is
592 			 * set to expire at or before the autosuspend delay,
593 			 * avoid the overhead of resetting it.  Just let it
594 			 * expire; pm_suspend_timer_fn() will take care of the
595 			 * rest.
596 			 */
597 			if (!(dev->power.timer_expires &&
598 					dev->power.timer_expires <= expires)) {
599 				/*
600 				 * We add a slack of 25% to gather wakeups
601 				 * without sacrificing the granularity.
602 				 */
603 				u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
604 						    (NSEC_PER_MSEC >> 2);
605 
606 				dev->power.timer_expires = expires;
607 				hrtimer_start_range_ns(&dev->power.suspend_timer,
608 						ns_to_ktime(expires),
609 						slack,
610 						HRTIMER_MODE_ABS);
611 			}
612 			dev->power.timer_autosuspends = 1;
613 			goto out;
614 		}
615 	}
616 
617 	/* Other scheduled or pending requests need to be canceled. */
618 	pm_runtime_cancel_pending(dev);
619 
620 	if (dev->power.runtime_status == RPM_SUSPENDING) {
621 		DEFINE_WAIT(wait);
622 
623 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
624 			retval = -EINPROGRESS;
625 			goto out;
626 		}
627 
628 		if (dev->power.irq_safe) {
629 			spin_unlock(&dev->power.lock);
630 
631 			cpu_relax();
632 
633 			spin_lock(&dev->power.lock);
634 			goto repeat;
635 		}
636 
637 		/* Wait for the other suspend running in parallel with us. */
638 		for (;;) {
639 			prepare_to_wait(&dev->power.wait_queue, &wait,
640 					TASK_UNINTERRUPTIBLE);
641 			if (dev->power.runtime_status != RPM_SUSPENDING)
642 				break;
643 
644 			spin_unlock_irq(&dev->power.lock);
645 
646 			schedule();
647 
648 			spin_lock_irq(&dev->power.lock);
649 		}
650 		finish_wait(&dev->power.wait_queue, &wait);
651 		goto repeat;
652 	}
653 
654 	if (dev->power.no_callbacks)
655 		goto no_callback;	/* Assume success. */
656 
657 	/* Carry out an asynchronous or a synchronous suspend. */
658 	if (rpmflags & RPM_ASYNC) {
659 		dev->power.request = (rpmflags & RPM_AUTO) ?
660 		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
661 		if (!dev->power.request_pending) {
662 			dev->power.request_pending = true;
663 			queue_work(pm_wq, &dev->power.work);
664 		}
665 		goto out;
666 	}
667 
668 	__update_runtime_status(dev, RPM_SUSPENDING);
669 
670 	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
671 
672 	dev_pm_enable_wake_irq_check(dev, true);
673 	retval = rpm_callback(callback, dev);
674 	if (retval)
675 		goto fail;
676 
677 	dev_pm_enable_wake_irq_complete(dev);
678 
679  no_callback:
680 	__update_runtime_status(dev, RPM_SUSPENDED);
681 	pm_runtime_deactivate_timer(dev);
682 
683 	if (dev->parent) {
684 		parent = dev->parent;
685 		atomic_add_unless(&parent->power.child_count, -1, 0);
686 	}
687 	wake_up_all(&dev->power.wait_queue);
688 
689 	if (dev->power.deferred_resume) {
690 		dev->power.deferred_resume = false;
691 		rpm_resume(dev, 0);
692 		retval = -EAGAIN;
693 		goto out;
694 	}
695 
696 	if (dev->power.irq_safe)
697 		goto out;
698 
699 	/* Maybe the parent is now able to suspend. */
700 	if (parent && !parent->power.ignore_children) {
701 		spin_unlock(&dev->power.lock);
702 
703 		spin_lock(&parent->power.lock);
704 		rpm_idle(parent, RPM_ASYNC);
705 		spin_unlock(&parent->power.lock);
706 
707 		spin_lock(&dev->power.lock);
708 	}
709 	/* Maybe the suppliers are now able to suspend. */
710 	if (dev->power.links_count > 0) {
711 		spin_unlock_irq(&dev->power.lock);
712 
713 		rpm_suspend_suppliers(dev);
714 
715 		spin_lock_irq(&dev->power.lock);
716 	}
717 
718  out:
719 	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
720 
721 	return retval;
722 
723  fail:
724 	dev_pm_disable_wake_irq_check(dev, true);
725 	__update_runtime_status(dev, RPM_ACTIVE);
726 	dev->power.deferred_resume = false;
727 	wake_up_all(&dev->power.wait_queue);
728 
729 	if (retval == -EAGAIN || retval == -EBUSY) {
730 		dev->power.runtime_error = 0;
731 
732 		/*
733 		 * If the callback routine failed an autosuspend, and
734 		 * if the last_busy time has been updated so that there
735 		 * is a new autosuspend expiration time, automatically
736 		 * reschedule another autosuspend.
737 		 */
738 		if ((rpmflags & RPM_AUTO) &&
739 		    pm_runtime_autosuspend_expiration(dev) != 0)
740 			goto repeat;
741 	} else {
742 		pm_runtime_cancel_pending(dev);
743 	}
744 	goto out;
745 }
746 
747 /**
748  * rpm_resume - Carry out runtime resume of given device.
749  * @dev: Device to resume.
750  * @rpmflags: Flag bits.
751  *
752  * Check if the device's runtime PM status allows it to be resumed.  Cancel
753  * any scheduled or pending requests.  If another resume has been started
754  * earlier, either return immediately or wait for it to finish, depending on the
755  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
756  * parallel with this function, either tell the other process to resume after
757  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
758  * flag is set then queue a resume request; otherwise run the
759  * ->runtime_resume() callback directly.  Queue an idle notification for the
760  * device if the resume succeeded.
761  *
762  * This function must be called under dev->power.lock with interrupts disabled.
763  */
rpm_resume(struct device * dev,int rpmflags)764 static int rpm_resume(struct device *dev, int rpmflags)
765 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
766 {
767 	int (*callback)(struct device *);
768 	struct device *parent = NULL;
769 	int retval = 0;
770 
771 	trace_rpm_resume_rcuidle(dev, rpmflags);
772 
773  repeat:
774 	if (dev->power.runtime_error) {
775 		retval = -EINVAL;
776 	} else if (dev->power.disable_depth > 0) {
777 		if (dev->power.runtime_status == RPM_ACTIVE &&
778 		    dev->power.last_status == RPM_ACTIVE)
779 			retval = 1;
780 		else
781 			retval = -EACCES;
782 	}
783 	if (retval)
784 		goto out;
785 
786 	/*
787 	 * Other scheduled or pending requests need to be canceled.  Small
788 	 * optimization: If an autosuspend timer is running, leave it running
789 	 * rather than cancelling it now only to restart it again in the near
790 	 * future.
791 	 */
792 	dev->power.request = RPM_REQ_NONE;
793 	if (!dev->power.timer_autosuspends)
794 		pm_runtime_deactivate_timer(dev);
795 
796 	if (dev->power.runtime_status == RPM_ACTIVE) {
797 		retval = 1;
798 		goto out;
799 	}
800 
801 	if (dev->power.runtime_status == RPM_RESUMING
802 	    || dev->power.runtime_status == RPM_SUSPENDING) {
803 		DEFINE_WAIT(wait);
804 
805 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
806 			if (dev->power.runtime_status == RPM_SUSPENDING) {
807 				dev->power.deferred_resume = true;
808 				if (rpmflags & RPM_NOWAIT)
809 					retval = -EINPROGRESS;
810 			} else {
811 				retval = -EINPROGRESS;
812 			}
813 			goto out;
814 		}
815 
816 		if (dev->power.irq_safe) {
817 			spin_unlock(&dev->power.lock);
818 
819 			cpu_relax();
820 
821 			spin_lock(&dev->power.lock);
822 			goto repeat;
823 		}
824 
825 		/* Wait for the operation carried out in parallel with us. */
826 		for (;;) {
827 			prepare_to_wait(&dev->power.wait_queue, &wait,
828 					TASK_UNINTERRUPTIBLE);
829 			if (dev->power.runtime_status != RPM_RESUMING
830 			    && dev->power.runtime_status != RPM_SUSPENDING)
831 				break;
832 
833 			spin_unlock_irq(&dev->power.lock);
834 
835 			schedule();
836 
837 			spin_lock_irq(&dev->power.lock);
838 		}
839 		finish_wait(&dev->power.wait_queue, &wait);
840 		goto repeat;
841 	}
842 
843 	/*
844 	 * See if we can skip waking up the parent.  This is safe only if
845 	 * power.no_callbacks is set, because otherwise we don't know whether
846 	 * the resume will actually succeed.
847 	 */
848 	if (dev->power.no_callbacks && !parent && dev->parent) {
849 		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
850 		if (dev->parent->power.disable_depth > 0
851 		    || dev->parent->power.ignore_children
852 		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
853 			atomic_inc(&dev->parent->power.child_count);
854 			spin_unlock(&dev->parent->power.lock);
855 			retval = 1;
856 			goto no_callback;	/* Assume success. */
857 		}
858 		spin_unlock(&dev->parent->power.lock);
859 	}
860 
861 	/* Carry out an asynchronous or a synchronous resume. */
862 	if (rpmflags & RPM_ASYNC) {
863 		dev->power.request = RPM_REQ_RESUME;
864 		if (!dev->power.request_pending) {
865 			dev->power.request_pending = true;
866 			queue_work(pm_wq, &dev->power.work);
867 		}
868 		retval = 0;
869 		goto out;
870 	}
871 
872 	if (!parent && dev->parent) {
873 		/*
874 		 * Increment the parent's usage counter and resume it if
875 		 * necessary.  Not needed if dev is irq-safe; then the
876 		 * parent is permanently resumed.
877 		 */
878 		parent = dev->parent;
879 		if (dev->power.irq_safe)
880 			goto skip_parent;
881 		spin_unlock(&dev->power.lock);
882 
883 		pm_runtime_get_noresume(parent);
884 
885 		spin_lock(&parent->power.lock);
886 		/*
887 		 * Resume the parent if it has runtime PM enabled and not been
888 		 * set to ignore its children.
889 		 */
890 		if (!parent->power.disable_depth
891 		    && !parent->power.ignore_children) {
892 			rpm_resume(parent, 0);
893 			if (parent->power.runtime_status != RPM_ACTIVE)
894 				retval = -EBUSY;
895 		}
896 		spin_unlock(&parent->power.lock);
897 
898 		spin_lock(&dev->power.lock);
899 		if (retval)
900 			goto out;
901 		goto repeat;
902 	}
903  skip_parent:
904 
905 	if (dev->power.no_callbacks)
906 		goto no_callback;	/* Assume success. */
907 
908 	__update_runtime_status(dev, RPM_RESUMING);
909 
910 	callback = RPM_GET_CALLBACK(dev, runtime_resume);
911 
912 	dev_pm_disable_wake_irq_check(dev, false);
913 	retval = rpm_callback(callback, dev);
914 	if (retval) {
915 		__update_runtime_status(dev, RPM_SUSPENDED);
916 		pm_runtime_cancel_pending(dev);
917 		dev_pm_enable_wake_irq_check(dev, false);
918 	} else {
919  no_callback:
920 		__update_runtime_status(dev, RPM_ACTIVE);
921 		pm_runtime_mark_last_busy(dev);
922 		if (parent)
923 			atomic_inc(&parent->power.child_count);
924 	}
925 	wake_up_all(&dev->power.wait_queue);
926 
927 	if (retval >= 0)
928 		rpm_idle(dev, RPM_ASYNC);
929 
930  out:
931 	if (parent && !dev->power.irq_safe) {
932 		spin_unlock_irq(&dev->power.lock);
933 
934 		pm_runtime_put(parent);
935 
936 		spin_lock_irq(&dev->power.lock);
937 	}
938 
939 	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
940 
941 	return retval;
942 }
943 
944 /**
945  * pm_runtime_work - Universal runtime PM work function.
946  * @work: Work structure used for scheduling the execution of this function.
947  *
948  * Use @work to get the device object the work is to be done for, determine what
949  * is to be done and execute the appropriate runtime PM function.
950  */
pm_runtime_work(struct work_struct * work)951 static void pm_runtime_work(struct work_struct *work)
952 {
953 	struct device *dev = container_of(work, struct device, power.work);
954 	enum rpm_request req;
955 
956 	spin_lock_irq(&dev->power.lock);
957 
958 	if (!dev->power.request_pending)
959 		goto out;
960 
961 	req = dev->power.request;
962 	dev->power.request = RPM_REQ_NONE;
963 	dev->power.request_pending = false;
964 
965 	switch (req) {
966 	case RPM_REQ_NONE:
967 		break;
968 	case RPM_REQ_IDLE:
969 		rpm_idle(dev, RPM_NOWAIT);
970 		break;
971 	case RPM_REQ_SUSPEND:
972 		rpm_suspend(dev, RPM_NOWAIT);
973 		break;
974 	case RPM_REQ_AUTOSUSPEND:
975 		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
976 		break;
977 	case RPM_REQ_RESUME:
978 		rpm_resume(dev, RPM_NOWAIT);
979 		break;
980 	}
981 
982  out:
983 	spin_unlock_irq(&dev->power.lock);
984 }
985 
986 /**
987  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
988  * @timer: hrtimer used by pm_schedule_suspend().
989  *
990  * Check if the time is right and queue a suspend request.
991  */
pm_suspend_timer_fn(struct hrtimer * timer)992 static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
993 {
994 	struct device *dev = container_of(timer, struct device, power.suspend_timer);
995 	unsigned long flags;
996 	u64 expires;
997 
998 	spin_lock_irqsave(&dev->power.lock, flags);
999 
1000 	expires = dev->power.timer_expires;
1001 	/*
1002 	 * If 'expires' is after the current time, we've been called
1003 	 * too early.
1004 	 */
1005 	if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
1006 		dev->power.timer_expires = 0;
1007 		rpm_suspend(dev, dev->power.timer_autosuspends ?
1008 		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
1009 	}
1010 
1011 	spin_unlock_irqrestore(&dev->power.lock, flags);
1012 
1013 	return HRTIMER_NORESTART;
1014 }
1015 
1016 /**
1017  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
1018  * @dev: Device to suspend.
1019  * @delay: Time to wait before submitting a suspend request, in milliseconds.
1020  */
pm_schedule_suspend(struct device * dev,unsigned int delay)1021 int pm_schedule_suspend(struct device *dev, unsigned int delay)
1022 {
1023 	unsigned long flags;
1024 	u64 expires;
1025 	int retval;
1026 
1027 	spin_lock_irqsave(&dev->power.lock, flags);
1028 
1029 	if (!delay) {
1030 		retval = rpm_suspend(dev, RPM_ASYNC);
1031 		goto out;
1032 	}
1033 
1034 	retval = rpm_check_suspend_allowed(dev);
1035 	if (retval)
1036 		goto out;
1037 
1038 	/* Other scheduled or pending requests need to be canceled. */
1039 	pm_runtime_cancel_pending(dev);
1040 
1041 	expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1042 	dev->power.timer_expires = expires;
1043 	dev->power.timer_autosuspends = 0;
1044 	hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1045 
1046  out:
1047 	spin_unlock_irqrestore(&dev->power.lock, flags);
1048 
1049 	return retval;
1050 }
1051 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1052 
rpm_drop_usage_count(struct device * dev)1053 static int rpm_drop_usage_count(struct device *dev)
1054 {
1055 	int ret;
1056 
1057 	ret = atomic_sub_return(1, &dev->power.usage_count);
1058 	if (ret >= 0)
1059 		return ret;
1060 
1061 	/*
1062 	 * Because rpm_resume() does not check the usage counter, it will resume
1063 	 * the device even if the usage counter is 0 or negative, so it is
1064 	 * sufficient to increment the usage counter here to reverse the change
1065 	 * made above.
1066 	 */
1067 	atomic_inc(&dev->power.usage_count);
1068 	dev_warn(dev, "Runtime PM usage count underflow!\n");
1069 	return -EINVAL;
1070 }
1071 
1072 /**
1073  * __pm_runtime_idle - Entry point for runtime idle operations.
1074  * @dev: Device to send idle notification for.
1075  * @rpmflags: Flag bits.
1076  *
1077  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1078  * return immediately if it is larger than zero (if it becomes negative, log a
1079  * warning, increment it, and return an error).  Then carry out an idle
1080  * notification, either synchronous or asynchronous.
1081  *
1082  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1083  * or if pm_runtime_irq_safe() has been called.
1084  */
__pm_runtime_idle(struct device * dev,int rpmflags)1085 int __pm_runtime_idle(struct device *dev, int rpmflags)
1086 {
1087 	unsigned long flags;
1088 	int retval;
1089 
1090 	if (rpmflags & RPM_GET_PUT) {
1091 		retval = rpm_drop_usage_count(dev);
1092 		if (retval < 0) {
1093 			return retval;
1094 		} else if (retval > 0) {
1095 			trace_rpm_usage_rcuidle(dev, rpmflags);
1096 			return 0;
1097 		}
1098 	}
1099 
1100 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1101 
1102 	spin_lock_irqsave(&dev->power.lock, flags);
1103 	retval = rpm_idle(dev, rpmflags);
1104 	spin_unlock_irqrestore(&dev->power.lock, flags);
1105 
1106 	return retval;
1107 }
1108 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1109 
1110 /**
1111  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1112  * @dev: Device to suspend.
1113  * @rpmflags: Flag bits.
1114  *
1115  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1116  * return immediately if it is larger than zero (if it becomes negative, log a
1117  * warning, increment it, and return an error).  Then carry out a suspend,
1118  * either synchronous or asynchronous.
1119  *
1120  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1121  * or if pm_runtime_irq_safe() has been called.
1122  */
__pm_runtime_suspend(struct device * dev,int rpmflags)1123 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1124 {
1125 	unsigned long flags;
1126 	int retval;
1127 
1128 	if (rpmflags & RPM_GET_PUT) {
1129 		retval = rpm_drop_usage_count(dev);
1130 		if (retval < 0) {
1131 			return retval;
1132 		} else if (retval > 0) {
1133 			trace_rpm_usage_rcuidle(dev, rpmflags);
1134 			return 0;
1135 		}
1136 	}
1137 
1138 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1139 
1140 	spin_lock_irqsave(&dev->power.lock, flags);
1141 	retval = rpm_suspend(dev, rpmflags);
1142 	spin_unlock_irqrestore(&dev->power.lock, flags);
1143 
1144 	return retval;
1145 }
1146 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1147 
1148 /**
1149  * __pm_runtime_resume - Entry point for runtime resume operations.
1150  * @dev: Device to resume.
1151  * @rpmflags: Flag bits.
1152  *
1153  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1154  * carry out a resume, either synchronous or asynchronous.
1155  *
1156  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1157  * or if pm_runtime_irq_safe() has been called.
1158  */
__pm_runtime_resume(struct device * dev,int rpmflags)1159 int __pm_runtime_resume(struct device *dev, int rpmflags)
1160 {
1161 	unsigned long flags;
1162 	int retval;
1163 
1164 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1165 			dev->power.runtime_status != RPM_ACTIVE);
1166 
1167 	if (rpmflags & RPM_GET_PUT)
1168 		atomic_inc(&dev->power.usage_count);
1169 
1170 	spin_lock_irqsave(&dev->power.lock, flags);
1171 	retval = rpm_resume(dev, rpmflags);
1172 	spin_unlock_irqrestore(&dev->power.lock, flags);
1173 
1174 	return retval;
1175 }
1176 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1177 
1178 /**
1179  * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1180  * @dev: Device to handle.
1181  * @ign_usage_count: Whether or not to look at the current usage counter value.
1182  *
1183  * Return -EINVAL if runtime PM is disabled for @dev.
1184  *
1185  * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1186  * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1187  * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1188  * without changing the usage counter.
1189  *
1190  * If @ign_usage_count is %true, this function can be used to prevent suspending
1191  * the device when its runtime PM status is %RPM_ACTIVE.
1192  *
1193  * If @ign_usage_count is %false, this function can be used to prevent
1194  * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1195  * runtime PM usage counter is not zero.
1196  *
1197  * The caller is responsible for decrementing the runtime PM usage counter of
1198  * @dev after this function has returned a positive value for it.
1199  */
pm_runtime_get_if_active(struct device * dev,bool ign_usage_count)1200 int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1201 {
1202 	unsigned long flags;
1203 	int retval;
1204 
1205 	spin_lock_irqsave(&dev->power.lock, flags);
1206 	if (dev->power.disable_depth > 0) {
1207 		retval = -EINVAL;
1208 	} else if (dev->power.runtime_status != RPM_ACTIVE) {
1209 		retval = 0;
1210 	} else if (ign_usage_count) {
1211 		retval = 1;
1212 		atomic_inc(&dev->power.usage_count);
1213 	} else {
1214 		retval = atomic_inc_not_zero(&dev->power.usage_count);
1215 	}
1216 	trace_rpm_usage_rcuidle(dev, 0);
1217 	spin_unlock_irqrestore(&dev->power.lock, flags);
1218 
1219 	return retval;
1220 }
1221 EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1222 
1223 /**
1224  * __pm_runtime_set_status - Set runtime PM status of a device.
1225  * @dev: Device to handle.
1226  * @status: New runtime PM status of the device.
1227  *
1228  * If runtime PM of the device is disabled or its power.runtime_error field is
1229  * different from zero, the status may be changed either to RPM_ACTIVE, or to
1230  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1231  * However, if the device has a parent and the parent is not active, and the
1232  * parent's power.ignore_children flag is unset, the device's status cannot be
1233  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1234  *
1235  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1236  * and the device parent's counter of unsuspended children is modified to
1237  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1238  * notification request for the parent is submitted.
1239  *
1240  * If @dev has any suppliers (as reflected by device links to them), and @status
1241  * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1242  * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1243  * of the @status value) and the suppliers will be deacticated on exit.  The
1244  * error returned by the failing supplier activation will be returned in that
1245  * case.
1246  */
__pm_runtime_set_status(struct device * dev,unsigned int status)1247 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1248 {
1249 	struct device *parent = dev->parent;
1250 	bool notify_parent = false;
1251 	unsigned long flags;
1252 	int error = 0;
1253 
1254 	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1255 		return -EINVAL;
1256 
1257 	spin_lock_irqsave(&dev->power.lock, flags);
1258 
1259 	/*
1260 	 * Prevent PM-runtime from being enabled for the device or return an
1261 	 * error if it is enabled already and working.
1262 	 */
1263 	if (dev->power.runtime_error || dev->power.disable_depth)
1264 		dev->power.disable_depth++;
1265 	else
1266 		error = -EAGAIN;
1267 
1268 	spin_unlock_irqrestore(&dev->power.lock, flags);
1269 
1270 	if (error)
1271 		return error;
1272 
1273 	/*
1274 	 * If the new status is RPM_ACTIVE, the suppliers can be activated
1275 	 * upfront regardless of the current status, because next time
1276 	 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1277 	 * involved will be dropped down to one anyway.
1278 	 */
1279 	if (status == RPM_ACTIVE) {
1280 		int idx = device_links_read_lock();
1281 
1282 		error = rpm_get_suppliers(dev);
1283 		if (error)
1284 			status = RPM_SUSPENDED;
1285 
1286 		device_links_read_unlock(idx);
1287 	}
1288 
1289 	spin_lock_irqsave(&dev->power.lock, flags);
1290 
1291 	if (dev->power.runtime_status == status || !parent)
1292 		goto out_set;
1293 
1294 	if (status == RPM_SUSPENDED) {
1295 		atomic_add_unless(&parent->power.child_count, -1, 0);
1296 		notify_parent = !parent->power.ignore_children;
1297 	} else {
1298 		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1299 
1300 		/*
1301 		 * It is invalid to put an active child under a parent that is
1302 		 * not active, has runtime PM enabled and the
1303 		 * 'power.ignore_children' flag unset.
1304 		 */
1305 		if (!parent->power.disable_depth
1306 		    && !parent->power.ignore_children
1307 		    && parent->power.runtime_status != RPM_ACTIVE) {
1308 			dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1309 				dev_name(dev),
1310 				dev_name(parent));
1311 			error = -EBUSY;
1312 		} else if (dev->power.runtime_status == RPM_SUSPENDED) {
1313 			atomic_inc(&parent->power.child_count);
1314 		}
1315 
1316 		spin_unlock(&parent->power.lock);
1317 
1318 		if (error) {
1319 			status = RPM_SUSPENDED;
1320 			goto out;
1321 		}
1322 	}
1323 
1324  out_set:
1325 	__update_runtime_status(dev, status);
1326 	if (!error)
1327 		dev->power.runtime_error = 0;
1328 
1329  out:
1330 	spin_unlock_irqrestore(&dev->power.lock, flags);
1331 
1332 	if (notify_parent)
1333 		pm_request_idle(parent);
1334 
1335 	if (status == RPM_SUSPENDED) {
1336 		int idx = device_links_read_lock();
1337 
1338 		rpm_put_suppliers(dev);
1339 
1340 		device_links_read_unlock(idx);
1341 	}
1342 
1343 	pm_runtime_enable(dev);
1344 
1345 	return error;
1346 }
1347 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1348 
1349 /**
1350  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1351  * @dev: Device to handle.
1352  *
1353  * Flush all pending requests for the device from pm_wq and wait for all
1354  * runtime PM operations involving the device in progress to complete.
1355  *
1356  * Should be called under dev->power.lock with interrupts disabled.
1357  */
__pm_runtime_barrier(struct device * dev)1358 static void __pm_runtime_barrier(struct device *dev)
1359 {
1360 	pm_runtime_deactivate_timer(dev);
1361 
1362 	if (dev->power.request_pending) {
1363 		dev->power.request = RPM_REQ_NONE;
1364 		spin_unlock_irq(&dev->power.lock);
1365 
1366 		cancel_work_sync(&dev->power.work);
1367 
1368 		spin_lock_irq(&dev->power.lock);
1369 		dev->power.request_pending = false;
1370 	}
1371 
1372 	if (dev->power.runtime_status == RPM_SUSPENDING
1373 	    || dev->power.runtime_status == RPM_RESUMING
1374 	    || dev->power.idle_notification) {
1375 		DEFINE_WAIT(wait);
1376 
1377 		/* Suspend, wake-up or idle notification in progress. */
1378 		for (;;) {
1379 			prepare_to_wait(&dev->power.wait_queue, &wait,
1380 					TASK_UNINTERRUPTIBLE);
1381 			if (dev->power.runtime_status != RPM_SUSPENDING
1382 			    && dev->power.runtime_status != RPM_RESUMING
1383 			    && !dev->power.idle_notification)
1384 				break;
1385 			spin_unlock_irq(&dev->power.lock);
1386 
1387 			schedule();
1388 
1389 			spin_lock_irq(&dev->power.lock);
1390 		}
1391 		finish_wait(&dev->power.wait_queue, &wait);
1392 	}
1393 }
1394 
1395 /**
1396  * pm_runtime_barrier - Flush pending requests and wait for completions.
1397  * @dev: Device to handle.
1398  *
1399  * Prevent the device from being suspended by incrementing its usage counter and
1400  * if there's a pending resume request for the device, wake the device up.
1401  * Next, make sure that all pending requests for the device have been flushed
1402  * from pm_wq and wait for all runtime PM operations involving the device in
1403  * progress to complete.
1404  *
1405  * Return value:
1406  * 1, if there was a resume request pending and the device had to be woken up,
1407  * 0, otherwise
1408  */
pm_runtime_barrier(struct device * dev)1409 int pm_runtime_barrier(struct device *dev)
1410 {
1411 	int retval = 0;
1412 
1413 	pm_runtime_get_noresume(dev);
1414 	spin_lock_irq(&dev->power.lock);
1415 
1416 	if (dev->power.request_pending
1417 	    && dev->power.request == RPM_REQ_RESUME) {
1418 		rpm_resume(dev, 0);
1419 		retval = 1;
1420 	}
1421 
1422 	__pm_runtime_barrier(dev);
1423 
1424 	spin_unlock_irq(&dev->power.lock);
1425 	pm_runtime_put_noidle(dev);
1426 
1427 	return retval;
1428 }
1429 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1430 
1431 /**
1432  * __pm_runtime_disable - Disable runtime PM of a device.
1433  * @dev: Device to handle.
1434  * @check_resume: If set, check if there's a resume request for the device.
1435  *
1436  * Increment power.disable_depth for the device and if it was zero previously,
1437  * cancel all pending runtime PM requests for the device and wait for all
1438  * operations in progress to complete.  The device can be either active or
1439  * suspended after its runtime PM has been disabled.
1440  *
1441  * If @check_resume is set and there's a resume request pending when
1442  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1443  * function will wake up the device before disabling its runtime PM.
1444  */
__pm_runtime_disable(struct device * dev,bool check_resume)1445 void __pm_runtime_disable(struct device *dev, bool check_resume)
1446 {
1447 	spin_lock_irq(&dev->power.lock);
1448 
1449 	if (dev->power.disable_depth > 0) {
1450 		dev->power.disable_depth++;
1451 		goto out;
1452 	}
1453 
1454 	/*
1455 	 * Wake up the device if there's a resume request pending, because that
1456 	 * means there probably is some I/O to process and disabling runtime PM
1457 	 * shouldn't prevent the device from processing the I/O.
1458 	 */
1459 	if (check_resume && dev->power.request_pending
1460 	    && dev->power.request == RPM_REQ_RESUME) {
1461 		/*
1462 		 * Prevent suspends and idle notifications from being carried
1463 		 * out after we have woken up the device.
1464 		 */
1465 		pm_runtime_get_noresume(dev);
1466 
1467 		rpm_resume(dev, 0);
1468 
1469 		pm_runtime_put_noidle(dev);
1470 	}
1471 
1472 	/* Update time accounting before disabling PM-runtime. */
1473 	update_pm_runtime_accounting(dev);
1474 
1475 	if (!dev->power.disable_depth++) {
1476 		__pm_runtime_barrier(dev);
1477 		dev->power.last_status = dev->power.runtime_status;
1478 	}
1479 
1480  out:
1481 	spin_unlock_irq(&dev->power.lock);
1482 }
1483 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1484 
1485 /**
1486  * pm_runtime_enable - Enable runtime PM of a device.
1487  * @dev: Device to handle.
1488  */
pm_runtime_enable(struct device * dev)1489 void pm_runtime_enable(struct device *dev)
1490 {
1491 	unsigned long flags;
1492 
1493 	spin_lock_irqsave(&dev->power.lock, flags);
1494 
1495 	if (!dev->power.disable_depth) {
1496 		dev_warn(dev, "Unbalanced %s!\n", __func__);
1497 		goto out;
1498 	}
1499 
1500 	if (--dev->power.disable_depth > 0)
1501 		goto out;
1502 
1503 	dev->power.last_status = RPM_INVALID;
1504 	dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1505 
1506 	if (dev->power.runtime_status == RPM_SUSPENDED &&
1507 	    !dev->power.ignore_children &&
1508 	    atomic_read(&dev->power.child_count) > 0)
1509 		dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1510 
1511 out:
1512 	spin_unlock_irqrestore(&dev->power.lock, flags);
1513 }
1514 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1515 
pm_runtime_disable_action(void * data)1516 static void pm_runtime_disable_action(void *data)
1517 {
1518 	pm_runtime_dont_use_autosuspend(data);
1519 	pm_runtime_disable(data);
1520 }
1521 
1522 /**
1523  * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
1524  *
1525  * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
1526  * you at driver exit time if needed.
1527  *
1528  * @dev: Device to handle.
1529  */
devm_pm_runtime_enable(struct device * dev)1530 int devm_pm_runtime_enable(struct device *dev)
1531 {
1532 	pm_runtime_enable(dev);
1533 
1534 	return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1535 }
1536 EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
1537 
1538 /**
1539  * pm_runtime_forbid - Block runtime PM of a device.
1540  * @dev: Device to handle.
1541  *
1542  * Increase the device's usage count and clear its power.runtime_auto flag,
1543  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1544  * for it.
1545  */
pm_runtime_forbid(struct device * dev)1546 void pm_runtime_forbid(struct device *dev)
1547 {
1548 	spin_lock_irq(&dev->power.lock);
1549 	if (!dev->power.runtime_auto)
1550 		goto out;
1551 
1552 	dev->power.runtime_auto = false;
1553 	atomic_inc(&dev->power.usage_count);
1554 	rpm_resume(dev, 0);
1555 
1556  out:
1557 	spin_unlock_irq(&dev->power.lock);
1558 }
1559 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1560 
1561 /**
1562  * pm_runtime_allow - Unblock runtime PM of a device.
1563  * @dev: Device to handle.
1564  *
1565  * Decrease the device's usage count and set its power.runtime_auto flag.
1566  */
pm_runtime_allow(struct device * dev)1567 void pm_runtime_allow(struct device *dev)
1568 {
1569 	int ret;
1570 
1571 	spin_lock_irq(&dev->power.lock);
1572 	if (dev->power.runtime_auto)
1573 		goto out;
1574 
1575 	dev->power.runtime_auto = true;
1576 	ret = rpm_drop_usage_count(dev);
1577 	if (ret == 0)
1578 		rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1579 	else if (ret > 0)
1580 		trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1581 
1582  out:
1583 	spin_unlock_irq(&dev->power.lock);
1584 }
1585 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1586 
1587 /**
1588  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1589  * @dev: Device to handle.
1590  *
1591  * Set the power.no_callbacks flag, which tells the PM core that this
1592  * device is power-managed through its parent and has no runtime PM
1593  * callbacks of its own.  The runtime sysfs attributes will be removed.
1594  */
pm_runtime_no_callbacks(struct device * dev)1595 void pm_runtime_no_callbacks(struct device *dev)
1596 {
1597 	spin_lock_irq(&dev->power.lock);
1598 	dev->power.no_callbacks = 1;
1599 	spin_unlock_irq(&dev->power.lock);
1600 	if (device_is_registered(dev))
1601 		rpm_sysfs_remove(dev);
1602 }
1603 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1604 
1605 /**
1606  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1607  * @dev: Device to handle
1608  *
1609  * Set the power.irq_safe flag, which tells the PM core that the
1610  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1611  * always be invoked with the spinlock held and interrupts disabled.  It also
1612  * causes the parent's usage counter to be permanently incremented, preventing
1613  * the parent from runtime suspending -- otherwise an irq-safe child might have
1614  * to wait for a non-irq-safe parent.
1615  */
pm_runtime_irq_safe(struct device * dev)1616 void pm_runtime_irq_safe(struct device *dev)
1617 {
1618 	if (dev->parent)
1619 		pm_runtime_get_sync(dev->parent);
1620 	spin_lock_irq(&dev->power.lock);
1621 	dev->power.irq_safe = 1;
1622 	spin_unlock_irq(&dev->power.lock);
1623 }
1624 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1625 
1626 /**
1627  * update_autosuspend - Handle a change to a device's autosuspend settings.
1628  * @dev: Device to handle.
1629  * @old_delay: The former autosuspend_delay value.
1630  * @old_use: The former use_autosuspend value.
1631  *
1632  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1633  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1634  *
1635  * This function must be called under dev->power.lock with interrupts disabled.
1636  */
update_autosuspend(struct device * dev,int old_delay,int old_use)1637 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1638 {
1639 	int delay = dev->power.autosuspend_delay;
1640 
1641 	/* Should runtime suspend be prevented now? */
1642 	if (dev->power.use_autosuspend && delay < 0) {
1643 
1644 		/* If it used to be allowed then prevent it. */
1645 		if (!old_use || old_delay >= 0) {
1646 			atomic_inc(&dev->power.usage_count);
1647 			rpm_resume(dev, 0);
1648 		} else {
1649 			trace_rpm_usage_rcuidle(dev, 0);
1650 		}
1651 	}
1652 
1653 	/* Runtime suspend should be allowed now. */
1654 	else {
1655 
1656 		/* If it used to be prevented then allow it. */
1657 		if (old_use && old_delay < 0)
1658 			atomic_dec(&dev->power.usage_count);
1659 
1660 		/* Maybe we can autosuspend now. */
1661 		rpm_idle(dev, RPM_AUTO);
1662 	}
1663 }
1664 
1665 /**
1666  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1667  * @dev: Device to handle.
1668  * @delay: Value of the new delay in milliseconds.
1669  *
1670  * Set the device's power.autosuspend_delay value.  If it changes to negative
1671  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1672  * changes the other way, allow runtime suspends.
1673  */
pm_runtime_set_autosuspend_delay(struct device * dev,int delay)1674 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1675 {
1676 	int old_delay, old_use;
1677 
1678 	spin_lock_irq(&dev->power.lock);
1679 	old_delay = dev->power.autosuspend_delay;
1680 	old_use = dev->power.use_autosuspend;
1681 	dev->power.autosuspend_delay = delay;
1682 	update_autosuspend(dev, old_delay, old_use);
1683 	spin_unlock_irq(&dev->power.lock);
1684 }
1685 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1686 
1687 /**
1688  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1689  * @dev: Device to handle.
1690  * @use: New value for use_autosuspend.
1691  *
1692  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1693  * suspends as needed.
1694  */
__pm_runtime_use_autosuspend(struct device * dev,bool use)1695 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1696 {
1697 	int old_delay, old_use;
1698 
1699 	spin_lock_irq(&dev->power.lock);
1700 	old_delay = dev->power.autosuspend_delay;
1701 	old_use = dev->power.use_autosuspend;
1702 	dev->power.use_autosuspend = use;
1703 	update_autosuspend(dev, old_delay, old_use);
1704 	spin_unlock_irq(&dev->power.lock);
1705 }
1706 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1707 
1708 /**
1709  * pm_runtime_init - Initialize runtime PM fields in given device object.
1710  * @dev: Device object to initialize.
1711  */
pm_runtime_init(struct device * dev)1712 void pm_runtime_init(struct device *dev)
1713 {
1714 	dev->power.runtime_status = RPM_SUSPENDED;
1715 	dev->power.last_status = RPM_INVALID;
1716 	dev->power.idle_notification = false;
1717 
1718 	dev->power.disable_depth = 1;
1719 	atomic_set(&dev->power.usage_count, 0);
1720 
1721 	dev->power.runtime_error = 0;
1722 
1723 	atomic_set(&dev->power.child_count, 0);
1724 	pm_suspend_ignore_children(dev, false);
1725 	dev->power.runtime_auto = true;
1726 
1727 	dev->power.request_pending = false;
1728 	dev->power.request = RPM_REQ_NONE;
1729 	dev->power.deferred_resume = false;
1730 	dev->power.needs_force_resume = 0;
1731 	INIT_WORK(&dev->power.work, pm_runtime_work);
1732 
1733 	dev->power.timer_expires = 0;
1734 	hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1735 	dev->power.suspend_timer.function = pm_suspend_timer_fn;
1736 
1737 	init_waitqueue_head(&dev->power.wait_queue);
1738 }
1739 
1740 /**
1741  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1742  * @dev: Device object to re-initialize.
1743  */
pm_runtime_reinit(struct device * dev)1744 void pm_runtime_reinit(struct device *dev)
1745 {
1746 	if (!pm_runtime_enabled(dev)) {
1747 		if (dev->power.runtime_status == RPM_ACTIVE)
1748 			pm_runtime_set_suspended(dev);
1749 		if (dev->power.irq_safe) {
1750 			spin_lock_irq(&dev->power.lock);
1751 			dev->power.irq_safe = 0;
1752 			spin_unlock_irq(&dev->power.lock);
1753 			if (dev->parent)
1754 				pm_runtime_put(dev->parent);
1755 		}
1756 	}
1757 }
1758 
1759 /**
1760  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1761  * @dev: Device object being removed from device hierarchy.
1762  */
pm_runtime_remove(struct device * dev)1763 void pm_runtime_remove(struct device *dev)
1764 {
1765 	__pm_runtime_disable(dev, false);
1766 	pm_runtime_reinit(dev);
1767 }
1768 
1769 /**
1770  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1771  * @dev: Consumer device.
1772  */
pm_runtime_get_suppliers(struct device * dev)1773 void pm_runtime_get_suppliers(struct device *dev)
1774 {
1775 	struct device_link *link;
1776 	int idx;
1777 
1778 	idx = device_links_read_lock();
1779 
1780 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1781 				device_links_read_lock_held())
1782 		if (link->flags & DL_FLAG_PM_RUNTIME) {
1783 			link->supplier_preactivated = true;
1784 			pm_runtime_get_sync(link->supplier);
1785 		}
1786 
1787 	device_links_read_unlock(idx);
1788 }
1789 
1790 /**
1791  * pm_runtime_put_suppliers - Drop references to supplier devices.
1792  * @dev: Consumer device.
1793  */
pm_runtime_put_suppliers(struct device * dev)1794 void pm_runtime_put_suppliers(struct device *dev)
1795 {
1796 	struct device_link *link;
1797 	int idx;
1798 
1799 	idx = device_links_read_lock();
1800 
1801 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1802 				device_links_read_lock_held())
1803 		if (link->supplier_preactivated) {
1804 			link->supplier_preactivated = false;
1805 			pm_runtime_put(link->supplier);
1806 		}
1807 
1808 	device_links_read_unlock(idx);
1809 }
1810 
pm_runtime_new_link(struct device * dev)1811 void pm_runtime_new_link(struct device *dev)
1812 {
1813 	spin_lock_irq(&dev->power.lock);
1814 	dev->power.links_count++;
1815 	spin_unlock_irq(&dev->power.lock);
1816 }
1817 
pm_runtime_drop_link_count(struct device * dev)1818 static void pm_runtime_drop_link_count(struct device *dev)
1819 {
1820 	spin_lock_irq(&dev->power.lock);
1821 	WARN_ON(dev->power.links_count == 0);
1822 	dev->power.links_count--;
1823 	spin_unlock_irq(&dev->power.lock);
1824 }
1825 
1826 /**
1827  * pm_runtime_drop_link - Prepare for device link removal.
1828  * @link: Device link going away.
1829  *
1830  * Drop the link count of the consumer end of @link and decrement the supplier
1831  * device's runtime PM usage counter as many times as needed to drop all of the
1832  * PM runtime reference to it from the consumer.
1833  */
pm_runtime_drop_link(struct device_link * link)1834 void pm_runtime_drop_link(struct device_link *link)
1835 {
1836 	if (!(link->flags & DL_FLAG_PM_RUNTIME))
1837 		return;
1838 
1839 	pm_runtime_drop_link_count(link->consumer);
1840 	pm_runtime_release_supplier(link);
1841 	pm_request_idle(link->supplier);
1842 }
1843 
pm_runtime_need_not_resume(struct device * dev)1844 static bool pm_runtime_need_not_resume(struct device *dev)
1845 {
1846 	return atomic_read(&dev->power.usage_count) <= 1 &&
1847 		(atomic_read(&dev->power.child_count) == 0 ||
1848 		 dev->power.ignore_children);
1849 }
1850 
1851 /**
1852  * pm_runtime_force_suspend - Force a device into suspend state if needed.
1853  * @dev: Device to suspend.
1854  *
1855  * Disable runtime PM so we safely can check the device's runtime PM status and
1856  * if it is active, invoke its ->runtime_suspend callback to suspend it and
1857  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1858  * usage and children counters don't indicate that the device was in use before
1859  * the system-wide transition under way, decrement its parent's children counter
1860  * (if there is a parent).  Keep runtime PM disabled to preserve the state
1861  * unless we encounter errors.
1862  *
1863  * Typically this function may be invoked from a system suspend callback to make
1864  * sure the device is put into low power state and it should only be used during
1865  * system-wide PM transitions to sleep states.  It assumes that the analogous
1866  * pm_runtime_force_resume() will be used to resume the device.
1867  */
pm_runtime_force_suspend(struct device * dev)1868 int pm_runtime_force_suspend(struct device *dev)
1869 {
1870 	int (*callback)(struct device *);
1871 	int ret;
1872 
1873 	pm_runtime_disable(dev);
1874 	if (pm_runtime_status_suspended(dev))
1875 		return 0;
1876 
1877 	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1878 
1879 	dev_pm_enable_wake_irq_check(dev, true);
1880 	ret = callback ? callback(dev) : 0;
1881 	if (ret)
1882 		goto err;
1883 
1884 	dev_pm_enable_wake_irq_complete(dev);
1885 
1886 	/*
1887 	 * If the device can stay in suspend after the system-wide transition
1888 	 * to the working state that will follow, drop the children counter of
1889 	 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1890 	 * function will be called again for it in the meantime.
1891 	 */
1892 	if (pm_runtime_need_not_resume(dev)) {
1893 		pm_runtime_set_suspended(dev);
1894 	} else {
1895 		__update_runtime_status(dev, RPM_SUSPENDED);
1896 		dev->power.needs_force_resume = 1;
1897 	}
1898 
1899 	return 0;
1900 
1901 err:
1902 	dev_pm_disable_wake_irq_check(dev, true);
1903 	pm_runtime_enable(dev);
1904 	return ret;
1905 }
1906 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1907 
1908 /**
1909  * pm_runtime_force_resume - Force a device into resume state if needed.
1910  * @dev: Device to resume.
1911  *
1912  * Prior invoking this function we expect the user to have brought the device
1913  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1914  * those actions and bring the device into full power, if it is expected to be
1915  * used on system resume.  In the other case, we defer the resume to be managed
1916  * via runtime PM.
1917  *
1918  * Typically this function may be invoked from a system resume callback.
1919  */
pm_runtime_force_resume(struct device * dev)1920 int pm_runtime_force_resume(struct device *dev)
1921 {
1922 	int (*callback)(struct device *);
1923 	int ret = 0;
1924 
1925 	if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1926 		goto out;
1927 
1928 	/*
1929 	 * The value of the parent's children counter is correct already, so
1930 	 * just update the status of the device.
1931 	 */
1932 	__update_runtime_status(dev, RPM_ACTIVE);
1933 
1934 	callback = RPM_GET_CALLBACK(dev, runtime_resume);
1935 
1936 	dev_pm_disable_wake_irq_check(dev, false);
1937 	ret = callback ? callback(dev) : 0;
1938 	if (ret) {
1939 		pm_runtime_set_suspended(dev);
1940 		dev_pm_enable_wake_irq_check(dev, false);
1941 		goto out;
1942 	}
1943 
1944 	pm_runtime_mark_last_busy(dev);
1945 out:
1946 	dev->power.needs_force_resume = 0;
1947 	pm_runtime_enable(dev);
1948 	return ret;
1949 }
1950 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
1951