Lines Matching +full:suspend +full:- +full:in +full:- +full:wait
1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
26 if (dev->pm_domain) in __rpm_get_callback()
27 ops = &dev->pm_domain->ops; in __rpm_get_callback()
28 else if (dev->type && dev->type->pm) in __rpm_get_callback()
29 ops = dev->type->pm; in __rpm_get_callback()
30 else if (dev->class && dev->class->pm) in __rpm_get_callback()
31 ops = dev->class->pm; in __rpm_get_callback()
32 else if (dev->bus && dev->bus->pm) in __rpm_get_callback()
33 ops = dev->bus->pm; in __rpm_get_callback()
42 if (!cb && dev->driver && dev->driver->pm) in __rpm_get_callback()
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); in __rpm_get_callback()
55 * update_pm_runtime_accounting - Update the time accounting of power states
58 * In order to be able to have time accounting of the various power states
60 * PM), we need to track the time spent in each state.
62 * runtime_status field is updated, to account the time in the old state
69 if (dev->power.disable_depth > 0) in update_pm_runtime_accounting()
72 last = dev->power.accounting_timestamp; in update_pm_runtime_accounting()
75 dev->power.accounting_timestamp = now; in update_pm_runtime_accounting()
85 delta = now - last; in update_pm_runtime_accounting()
87 if (dev->power.runtime_status == RPM_SUSPENDED) in update_pm_runtime_accounting()
88 dev->power.suspended_time += delta; in update_pm_runtime_accounting()
90 dev->power.active_time += delta; in update_pm_runtime_accounting()
96 dev->power.runtime_status = status; in __update_runtime_status()
104 spin_lock_irqsave(&dev->power.lock, flags); in rpm_get_accounted_time()
107 time = suspended ? dev->power.suspended_time : dev->power.active_time; in rpm_get_accounted_time()
109 spin_unlock_irqrestore(&dev->power.lock, flags); in rpm_get_accounted_time()
126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
131 if (dev->power.timer_expires > 0) { in pm_runtime_deactivate_timer()
132 hrtimer_try_to_cancel(&dev->power.suspend_timer); in pm_runtime_deactivate_timer()
133 dev->power.timer_expires = 0; in pm_runtime_deactivate_timer()
138 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
145 * In case there's a request pending, make sure its work function will in pm_runtime_cancel_pending()
148 dev->power.request = RPM_REQ_NONE; in pm_runtime_cancel_pending()
152 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
155 * Compute the autosuspend-delay expiration time based on the device's
158 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
160 * This function may be called either with or without dev->power.lock held.
168 if (!dev->power.use_autosuspend) in pm_runtime_autosuspend_expiration()
171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); in pm_runtime_autosuspend_expiration()
175 expires = READ_ONCE(dev->power.last_busy); in pm_runtime_autosuspend_expiration()
178 return expires; /* Expires in the future */ in pm_runtime_autosuspend_expiration()
186 return dev->power.memalloc_noio; in dev_memalloc_noio()
190 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
194 * Set the flag for all devices in the path from the device to the
195 * root device in the device tree if @enable is true, otherwise clear
196 * the flag for devices in the path whose siblings don't set the flag.
200 * resume/suspend:
203 * resume/suspend callback of any one of its ancestors(or the
208 * are involved in iSCSI kind of situation.
210 * The lock of dev_hotplug_mutex is held in the function for handling
212 * in async probe().
225 /* hold power lock since bitfield is not SMP-safe. */ in pm_runtime_set_memalloc_noio()
226 spin_lock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
227 enabled = dev->power.memalloc_noio; in pm_runtime_set_memalloc_noio()
228 dev->power.memalloc_noio = enable; in pm_runtime_set_memalloc_noio()
229 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
238 dev = dev->parent; in pm_runtime_set_memalloc_noio()
255 * rpm_check_suspend_allowed - Test whether a device may be suspended.
262 if (dev->power.runtime_error) in rpm_check_suspend_allowed()
263 retval = -EINVAL; in rpm_check_suspend_allowed()
264 else if (dev->power.disable_depth > 0) in rpm_check_suspend_allowed()
265 retval = -EACCES; in rpm_check_suspend_allowed()
266 else if (atomic_read(&dev->power.usage_count) > 0) in rpm_check_suspend_allowed()
267 retval = -EAGAIN; in rpm_check_suspend_allowed()
268 else if (!dev->power.ignore_children && in rpm_check_suspend_allowed()
269 atomic_read(&dev->power.child_count)) in rpm_check_suspend_allowed()
270 retval = -EBUSY; in rpm_check_suspend_allowed()
273 else if ((dev->power.deferred_resume in rpm_check_suspend_allowed()
274 && dev->power.runtime_status == RPM_SUSPENDING) in rpm_check_suspend_allowed()
275 || (dev->power.request_pending in rpm_check_suspend_allowed()
276 && dev->power.request == RPM_REQ_RESUME)) in rpm_check_suspend_allowed()
277 retval = -EAGAIN; in rpm_check_suspend_allowed()
279 retval = -EPERM; in rpm_check_suspend_allowed()
280 else if (dev->power.runtime_status == RPM_SUSPENDED) in rpm_check_suspend_allowed()
290 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in rpm_get_suppliers()
294 if (!(link->flags & DL_FLAG_PM_RUNTIME)) in rpm_get_suppliers()
297 retval = pm_runtime_get_sync(link->supplier); in rpm_get_suppliers()
299 if (retval < 0 && retval != -EACCES) { in rpm_get_suppliers()
300 pm_runtime_put_noidle(link->supplier); in rpm_get_suppliers()
303 refcount_inc(&link->rpm_active); in rpm_get_suppliers()
309 * pm_runtime_release_supplier - Drop references to device link's supplier.
316 struct device *supplier = link->supplier; in pm_runtime_release_supplier()
319 * The additional power.usage_count check is a safety net in case in pm_runtime_release_supplier()
320 * the rpm_active refcount becomes saturated, in which case in pm_runtime_release_supplier()
324 while (refcount_dec_not_one(&link->rpm_active) && in pm_runtime_release_supplier()
325 atomic_read(&supplier->power.usage_count) > 0) in pm_runtime_release_supplier()
333 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in __rpm_put_suppliers()
337 pm_request_idle(link->supplier); in __rpm_put_suppliers()
351 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in rpm_suspend_suppliers()
353 pm_request_idle(link->supplier); in rpm_suspend_suppliers()
359 * __rpm_callback - Run a given runtime PM callback for a given device.
364 __releases(&dev->power.lock) __acquires(&dev->power.lock) in __rpm_callback()
367 bool use_links = dev->power.links_count > 0; in __rpm_callback()
369 if (dev->power.irq_safe) { in __rpm_callback()
370 spin_unlock(&dev->power.lock); in __rpm_callback()
372 spin_unlock_irq(&dev->power.lock); in __rpm_callback()
381 if (use_links && dev->power.runtime_status == RPM_RESUMING) { in __rpm_callback()
396 if (dev->power.irq_safe) { in __rpm_callback()
397 spin_lock(&dev->power.lock); in __rpm_callback()
407 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval) in __rpm_callback()
408 || (dev->power.runtime_status == RPM_RESUMING && retval))) { in __rpm_callback()
417 spin_lock_irq(&dev->power.lock); in __rpm_callback()
424 * rpm_idle - Notify device bus type if the device can be suspended.
430 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
431 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
434 * This function must be called under dev->power.lock with interrupts disabled.
446 /* Idle notifications are allowed only in the RPM_ACTIVE state. */ in rpm_idle()
447 else if (dev->power.runtime_status != RPM_ACTIVE) in rpm_idle()
448 retval = -EAGAIN; in rpm_idle()
454 else if (dev->power.request_pending && in rpm_idle()
455 dev->power.request > RPM_REQ_IDLE) in rpm_idle()
456 retval = -EAGAIN; in rpm_idle()
459 else if (dev->power.idle_notification) in rpm_idle()
460 retval = -EINPROGRESS; in rpm_idle()
465 dev->power.request = RPM_REQ_NONE; in rpm_idle()
470 if (!callback || dev->power.no_callbacks) in rpm_idle()
475 dev->power.request = RPM_REQ_IDLE; in rpm_idle()
476 if (!dev->power.request_pending) { in rpm_idle()
477 dev->power.request_pending = true; in rpm_idle()
478 queue_work(pm_wq, &dev->power.work); in rpm_idle()
484 dev->power.idle_notification = true; in rpm_idle()
486 if (dev->power.irq_safe) in rpm_idle()
487 spin_unlock(&dev->power.lock); in rpm_idle()
489 spin_unlock_irq(&dev->power.lock); in rpm_idle()
493 if (dev->power.irq_safe) in rpm_idle()
494 spin_lock(&dev->power.lock); in rpm_idle()
496 spin_lock_irq(&dev->power.lock); in rpm_idle()
498 dev->power.idle_notification = false; in rpm_idle()
499 wake_up_all(&dev->power.wait_queue); in rpm_idle()
507 * rpm_callback - Run a given runtime PM callback for a given device.
516 return -ENOSYS; in rpm_callback()
518 if (dev->power.memalloc_noio) { in rpm_callback()
537 dev->power.runtime_error = retval; in rpm_callback()
538 return retval != -EACCES ? retval : -EIO; in rpm_callback()
542 * rpm_suspend - Carry out runtime suspend of given device.
543 * @dev: Device to suspend.
547 * Cancel a pending idle notification, autosuspend or suspend. If
548 * another suspend has been started earlier, either return immediately
549 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
550 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
551 * otherwise run the ->runtime_suspend() callback directly. When
552 * ->runtime_suspend succeeded, if a deferred resume was requested while
554 * notification for its parent (if the suspend succeeded and both
555 * ignore_children of parent->power and irq_safe of dev->power are not set).
556 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
557 * flag is set and the next autosuspend-delay expiration time is in the
560 * This function must be called under dev->power.lock with interrupts disabled.
563 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_suspend()
576 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ in rpm_suspend()
577 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) in rpm_suspend()
578 retval = -EAGAIN; in rpm_suspend()
584 && dev->power.runtime_status != RPM_SUSPENDING) { in rpm_suspend()
589 dev->power.request = RPM_REQ_NONE; in rpm_suspend()
598 if (!(dev->power.timer_expires && in rpm_suspend()
599 dev->power.timer_expires <= expires)) { in rpm_suspend()
604 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * in rpm_suspend()
607 dev->power.timer_expires = expires; in rpm_suspend()
608 hrtimer_start_range_ns(&dev->power.suspend_timer, in rpm_suspend()
613 dev->power.timer_autosuspends = 1; in rpm_suspend()
621 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_suspend()
622 DEFINE_WAIT(wait); in rpm_suspend()
625 retval = -EINPROGRESS; in rpm_suspend()
629 if (dev->power.irq_safe) { in rpm_suspend()
630 spin_unlock(&dev->power.lock); in rpm_suspend()
634 spin_lock(&dev->power.lock); in rpm_suspend()
638 /* Wait for the other suspend running in parallel with us. */ in rpm_suspend()
640 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_suspend()
642 if (dev->power.runtime_status != RPM_SUSPENDING) in rpm_suspend()
645 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
649 spin_lock_irq(&dev->power.lock); in rpm_suspend()
651 finish_wait(&dev->power.wait_queue, &wait); in rpm_suspend()
655 if (dev->power.no_callbacks) in rpm_suspend()
658 /* Carry out an asynchronous or a synchronous suspend. */ in rpm_suspend()
660 dev->power.request = (rpmflags & RPM_AUTO) ? in rpm_suspend()
662 if (!dev->power.request_pending) { in rpm_suspend()
663 dev->power.request_pending = true; in rpm_suspend()
664 queue_work(pm_wq, &dev->power.work); in rpm_suspend()
684 if (dev->parent) { in rpm_suspend()
685 parent = dev->parent; in rpm_suspend()
686 atomic_add_unless(&parent->power.child_count, -1, 0); in rpm_suspend()
688 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
690 if (dev->power.deferred_resume) { in rpm_suspend()
691 dev->power.deferred_resume = false; in rpm_suspend()
693 retval = -EAGAIN; in rpm_suspend()
697 if (dev->power.irq_safe) in rpm_suspend()
700 /* Maybe the parent is now able to suspend. */ in rpm_suspend()
701 if (parent && !parent->power.ignore_children) { in rpm_suspend()
702 spin_unlock(&dev->power.lock); in rpm_suspend()
704 spin_lock(&parent->power.lock); in rpm_suspend()
706 spin_unlock(&parent->power.lock); in rpm_suspend()
708 spin_lock(&dev->power.lock); in rpm_suspend()
710 /* Maybe the suppliers are now able to suspend. */ in rpm_suspend()
711 if (dev->power.links_count > 0) { in rpm_suspend()
712 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
716 spin_lock_irq(&dev->power.lock); in rpm_suspend()
727 dev->power.deferred_resume = false; in rpm_suspend()
728 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
730 if (retval == -EAGAIN || retval == -EBUSY) { in rpm_suspend()
731 dev->power.runtime_error = 0; in rpm_suspend()
749 * rpm_resume - Carry out runtime resume of given device.
755 * earlier, either return immediately or wait for it to finish, depending on the
756 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
758 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
760 * ->runtime_resume() callback directly. Queue an idle notification for the
763 * This function must be called under dev->power.lock with interrupts disabled.
766 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_resume()
775 if (dev->power.runtime_error) in rpm_resume()
776 retval = -EINVAL; in rpm_resume()
777 else if (dev->power.disable_depth == 1 && dev->power.is_suspended in rpm_resume()
778 && dev->power.runtime_status == RPM_ACTIVE) in rpm_resume()
780 else if (dev->power.disable_depth > 0) in rpm_resume()
781 retval = -EACCES; in rpm_resume()
788 * rather than cancelling it now only to restart it again in the near in rpm_resume()
791 dev->power.request = RPM_REQ_NONE; in rpm_resume()
792 if (!dev->power.timer_autosuspends) in rpm_resume()
795 if (dev->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
800 if (dev->power.runtime_status == RPM_RESUMING in rpm_resume()
801 || dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
802 DEFINE_WAIT(wait); in rpm_resume()
805 if (dev->power.runtime_status == RPM_SUSPENDING) in rpm_resume()
806 dev->power.deferred_resume = true; in rpm_resume()
808 retval = -EINPROGRESS; in rpm_resume()
812 if (dev->power.irq_safe) { in rpm_resume()
813 spin_unlock(&dev->power.lock); in rpm_resume()
817 spin_lock(&dev->power.lock); in rpm_resume()
821 /* Wait for the operation carried out in parallel with us. */ in rpm_resume()
823 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_resume()
825 if (dev->power.runtime_status != RPM_RESUMING in rpm_resume()
826 && dev->power.runtime_status != RPM_SUSPENDING) in rpm_resume()
829 spin_unlock_irq(&dev->power.lock); in rpm_resume()
833 spin_lock_irq(&dev->power.lock); in rpm_resume()
835 finish_wait(&dev->power.wait_queue, &wait); in rpm_resume()
844 if (dev->power.no_callbacks && !parent && dev->parent) { in rpm_resume()
845 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); in rpm_resume()
846 if (dev->parent->power.disable_depth > 0 in rpm_resume()
847 || dev->parent->power.ignore_children in rpm_resume()
848 || dev->parent->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
849 atomic_inc(&dev->parent->power.child_count); in rpm_resume()
850 spin_unlock(&dev->parent->power.lock); in rpm_resume()
854 spin_unlock(&dev->parent->power.lock); in rpm_resume()
859 dev->power.request = RPM_REQ_RESUME; in rpm_resume()
860 if (!dev->power.request_pending) { in rpm_resume()
861 dev->power.request_pending = true; in rpm_resume()
862 queue_work(pm_wq, &dev->power.work); in rpm_resume()
868 if (!parent && dev->parent) { in rpm_resume()
871 * necessary. Not needed if dev is irq-safe; then the in rpm_resume()
874 parent = dev->parent; in rpm_resume()
875 if (dev->power.irq_safe) in rpm_resume()
877 spin_unlock(&dev->power.lock); in rpm_resume()
881 spin_lock(&parent->power.lock); in rpm_resume()
886 if (!parent->power.disable_depth in rpm_resume()
887 && !parent->power.ignore_children) { in rpm_resume()
889 if (parent->power.runtime_status != RPM_ACTIVE) in rpm_resume()
890 retval = -EBUSY; in rpm_resume()
892 spin_unlock(&parent->power.lock); in rpm_resume()
894 spin_lock(&dev->power.lock); in rpm_resume()
901 if (dev->power.no_callbacks) in rpm_resume()
919 atomic_inc(&parent->power.child_count); in rpm_resume()
921 wake_up_all(&dev->power.wait_queue); in rpm_resume()
927 if (parent && !dev->power.irq_safe) { in rpm_resume()
928 spin_unlock_irq(&dev->power.lock); in rpm_resume()
932 spin_lock_irq(&dev->power.lock); in rpm_resume()
941 * pm_runtime_work - Universal runtime PM work function.
952 spin_lock_irq(&dev->power.lock); in pm_runtime_work()
954 if (!dev->power.request_pending) in pm_runtime_work()
957 req = dev->power.request; in pm_runtime_work()
958 dev->power.request = RPM_REQ_NONE; in pm_runtime_work()
959 dev->power.request_pending = false; in pm_runtime_work()
979 spin_unlock_irq(&dev->power.lock); in pm_runtime_work()
983 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
986 * Check if the time is right and queue a suspend request.
994 spin_lock_irqsave(&dev->power.lock, flags); in pm_suspend_timer_fn()
996 expires = dev->power.timer_expires; in pm_suspend_timer_fn()
1002 dev->power.timer_expires = 0; in pm_suspend_timer_fn()
1003 rpm_suspend(dev, dev->power.timer_autosuspends ? in pm_suspend_timer_fn()
1007 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_suspend_timer_fn()
1013 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
1014 * @dev: Device to suspend.
1015 * @delay: Time to wait before submitting a suspend request, in milliseconds.
1023 spin_lock_irqsave(&dev->power.lock, flags); in pm_schedule_suspend()
1038 dev->power.timer_expires = expires; in pm_schedule_suspend()
1039 dev->power.timer_autosuspends = 0; in pm_schedule_suspend()
1040 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); in pm_schedule_suspend()
1043 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_schedule_suspend()
1050 * __pm_runtime_idle - Entry point for runtime idle operations.
1058 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1067 if (!atomic_dec_and_test(&dev->power.usage_count)) { in __pm_runtime_idle()
1073 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_idle()
1075 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_idle()
1077 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_idle()
1084 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1085 * @dev: Device to suspend.
1089 * return immediately if it is larger than zero. Then carry out a suspend,
1092 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1101 if (!atomic_dec_and_test(&dev->power.usage_count)) { in __pm_runtime_suspend()
1107 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_suspend()
1109 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_suspend()
1111 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_suspend()
1118 * __pm_runtime_resume - Entry point for runtime resume operations.
1125 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1133 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && in __pm_runtime_resume()
1134 dev->power.runtime_status != RPM_ACTIVE); in __pm_runtime_resume()
1137 atomic_inc(&dev->power.usage_count); in __pm_runtime_resume()
1139 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_resume()
1141 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_resume()
1148 * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1152 * Return -EINVAL if runtime PM is disabled for @dev.
1174 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_get_if_active()
1175 if (dev->power.disable_depth > 0) { in pm_runtime_get_if_active()
1176 retval = -EINVAL; in pm_runtime_get_if_active()
1177 } else if (dev->power.runtime_status != RPM_ACTIVE) { in pm_runtime_get_if_active()
1181 atomic_inc(&dev->power.usage_count); in pm_runtime_get_if_active()
1183 retval = atomic_inc_not_zero(&dev->power.usage_count); in pm_runtime_get_if_active()
1186 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_get_if_active()
1193 * __pm_runtime_set_status - Set runtime PM status of a device.
1202 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1213 * error returned by the failing supplier activation will be returned in that
1218 struct device *parent = dev->parent; in __pm_runtime_set_status()
1223 return -EINVAL; in __pm_runtime_set_status()
1225 spin_lock_irq(&dev->power.lock); in __pm_runtime_set_status()
1228 * Prevent PM-runtime from being enabled for the device or return an in __pm_runtime_set_status()
1231 if (dev->power.runtime_error || dev->power.disable_depth) in __pm_runtime_set_status()
1232 dev->power.disable_depth++; in __pm_runtime_set_status()
1234 error = -EAGAIN; in __pm_runtime_set_status()
1236 spin_unlock_irq(&dev->power.lock); in __pm_runtime_set_status()
1257 spin_lock_irq(&dev->power.lock); in __pm_runtime_set_status()
1259 if (dev->power.runtime_status == status || !parent) in __pm_runtime_set_status()
1263 atomic_add_unless(&parent->power.child_count, -1, 0); in __pm_runtime_set_status()
1264 notify_parent = !parent->power.ignore_children; in __pm_runtime_set_status()
1266 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); in __pm_runtime_set_status()
1273 if (!parent->power.disable_depth in __pm_runtime_set_status()
1274 && !parent->power.ignore_children in __pm_runtime_set_status()
1275 && parent->power.runtime_status != RPM_ACTIVE) { in __pm_runtime_set_status()
1279 error = -EBUSY; in __pm_runtime_set_status()
1280 } else if (dev->power.runtime_status == RPM_SUSPENDED) { in __pm_runtime_set_status()
1281 atomic_inc(&parent->power.child_count); in __pm_runtime_set_status()
1284 spin_unlock(&parent->power.lock); in __pm_runtime_set_status()
1295 dev->power.runtime_error = 0; in __pm_runtime_set_status()
1298 spin_unlock_irq(&dev->power.lock); in __pm_runtime_set_status()
1318 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1321 * Flush all pending requests for the device from pm_wq and wait for all
1322 * runtime PM operations involving the device in progress to complete.
1324 * Should be called under dev->power.lock with interrupts disabled.
1330 if (dev->power.request_pending) { in __pm_runtime_barrier()
1331 dev->power.request = RPM_REQ_NONE; in __pm_runtime_barrier()
1332 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1334 cancel_work_sync(&dev->power.work); in __pm_runtime_barrier()
1336 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1337 dev->power.request_pending = false; in __pm_runtime_barrier()
1340 if (dev->power.runtime_status == RPM_SUSPENDING in __pm_runtime_barrier()
1341 || dev->power.runtime_status == RPM_RESUMING in __pm_runtime_barrier()
1342 || dev->power.idle_notification) { in __pm_runtime_barrier()
1343 DEFINE_WAIT(wait); in __pm_runtime_barrier()
1345 /* Suspend, wake-up or idle notification in progress. */ in __pm_runtime_barrier()
1347 prepare_to_wait(&dev->power.wait_queue, &wait, in __pm_runtime_barrier()
1349 if (dev->power.runtime_status != RPM_SUSPENDING in __pm_runtime_barrier()
1350 && dev->power.runtime_status != RPM_RESUMING in __pm_runtime_barrier()
1351 && !dev->power.idle_notification) in __pm_runtime_barrier()
1353 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1357 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1359 finish_wait(&dev->power.wait_queue, &wait); in __pm_runtime_barrier()
1364 * pm_runtime_barrier - Flush pending requests and wait for completions.
1370 * from pm_wq and wait for all runtime PM operations involving the device in
1382 spin_lock_irq(&dev->power.lock); in pm_runtime_barrier()
1384 if (dev->power.request_pending in pm_runtime_barrier()
1385 && dev->power.request == RPM_REQ_RESUME) { in pm_runtime_barrier()
1392 spin_unlock_irq(&dev->power.lock); in pm_runtime_barrier()
1400 * __pm_runtime_disable - Disable runtime PM of a device.
1405 * cancel all pending runtime PM requests for the device and wait for all
1406 * operations in progress to complete. The device can be either active or
1415 spin_lock_irq(&dev->power.lock); in __pm_runtime_disable()
1417 if (dev->power.disable_depth > 0) { in __pm_runtime_disable()
1418 dev->power.disable_depth++; in __pm_runtime_disable()
1427 if (check_resume && dev->power.request_pending in __pm_runtime_disable()
1428 && dev->power.request == RPM_REQ_RESUME) { in __pm_runtime_disable()
1440 /* Update time accounting before disabling PM-runtime. */ in __pm_runtime_disable()
1443 if (!dev->power.disable_depth++) in __pm_runtime_disable()
1447 spin_unlock_irq(&dev->power.lock); in __pm_runtime_disable()
1452 * pm_runtime_enable - Enable runtime PM of a device.
1459 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_enable()
1461 if (dev->power.disable_depth > 0) { in pm_runtime_enable()
1462 dev->power.disable_depth--; in pm_runtime_enable()
1465 if (!dev->power.disable_depth) in pm_runtime_enable()
1466 dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); in pm_runtime_enable()
1471 WARN(!dev->power.disable_depth && in pm_runtime_enable()
1472 dev->power.runtime_status == RPM_SUSPENDED && in pm_runtime_enable()
1473 !dev->power.ignore_children && in pm_runtime_enable()
1474 atomic_read(&dev->power.child_count) > 0, in pm_runtime_enable()
1478 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_enable()
1489 * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
1505 * pm_runtime_forbid - Block runtime PM of a device.
1514 spin_lock_irq(&dev->power.lock); in pm_runtime_forbid()
1515 if (!dev->power.runtime_auto) in pm_runtime_forbid()
1518 dev->power.runtime_auto = false; in pm_runtime_forbid()
1519 atomic_inc(&dev->power.usage_count); in pm_runtime_forbid()
1523 spin_unlock_irq(&dev->power.lock); in pm_runtime_forbid()
1528 * pm_runtime_allow - Unblock runtime PM of a device.
1535 spin_lock_irq(&dev->power.lock); in pm_runtime_allow()
1536 if (dev->power.runtime_auto) in pm_runtime_allow()
1539 dev->power.runtime_auto = true; in pm_runtime_allow()
1540 if (atomic_dec_and_test(&dev->power.usage_count)) in pm_runtime_allow()
1546 spin_unlock_irq(&dev->power.lock); in pm_runtime_allow()
1551 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1555 * device is power-managed through its parent and has no runtime PM
1560 spin_lock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1561 dev->power.no_callbacks = 1; in pm_runtime_no_callbacks()
1562 spin_unlock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1569 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1573 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1576 * the parent from runtime suspending -- otherwise an irq-safe child might have
1577 * to wait for a non-irq-safe parent.
1581 if (dev->parent) in pm_runtime_irq_safe()
1582 pm_runtime_get_sync(dev->parent); in pm_runtime_irq_safe()
1583 spin_lock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1584 dev->power.irq_safe = 1; in pm_runtime_irq_safe()
1585 spin_unlock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1590 * update_autosuspend - Handle a change to a device's autosuspend settings.
1595 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1598 * This function must be called under dev->power.lock with interrupts disabled.
1602 int delay = dev->power.autosuspend_delay; in update_autosuspend()
1604 /* Should runtime suspend be prevented now? */ in update_autosuspend()
1605 if (dev->power.use_autosuspend && delay < 0) { in update_autosuspend()
1609 atomic_inc(&dev->power.usage_count); in update_autosuspend()
1616 /* Runtime suspend should be allowed now. */ in update_autosuspend()
1621 atomic_dec(&dev->power.usage_count); in update_autosuspend()
1629 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1631 * @delay: Value of the new delay in milliseconds.
1641 spin_lock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1642 old_delay = dev->power.autosuspend_delay; in pm_runtime_set_autosuspend_delay()
1643 old_use = dev->power.use_autosuspend; in pm_runtime_set_autosuspend_delay()
1644 dev->power.autosuspend_delay = delay; in pm_runtime_set_autosuspend_delay()
1646 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1651 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1662 spin_lock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1663 old_delay = dev->power.autosuspend_delay; in __pm_runtime_use_autosuspend()
1664 old_use = dev->power.use_autosuspend; in __pm_runtime_use_autosuspend()
1665 dev->power.use_autosuspend = use; in __pm_runtime_use_autosuspend()
1667 spin_unlock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1672 * pm_runtime_init - Initialize runtime PM fields in given device object.
1677 dev->power.runtime_status = RPM_SUSPENDED; in pm_runtime_init()
1678 dev->power.idle_notification = false; in pm_runtime_init()
1680 dev->power.disable_depth = 1; in pm_runtime_init()
1681 atomic_set(&dev->power.usage_count, 0); in pm_runtime_init()
1683 dev->power.runtime_error = 0; in pm_runtime_init()
1685 atomic_set(&dev->power.child_count, 0); in pm_runtime_init()
1687 dev->power.runtime_auto = true; in pm_runtime_init()
1689 dev->power.request_pending = false; in pm_runtime_init()
1690 dev->power.request = RPM_REQ_NONE; in pm_runtime_init()
1691 dev->power.deferred_resume = false; in pm_runtime_init()
1692 dev->power.needs_force_resume = 0; in pm_runtime_init()
1693 INIT_WORK(&dev->power.work, pm_runtime_work); in pm_runtime_init()
1695 dev->power.timer_expires = 0; in pm_runtime_init()
1696 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); in pm_runtime_init()
1697 dev->power.suspend_timer.function = pm_suspend_timer_fn; in pm_runtime_init()
1699 init_waitqueue_head(&dev->power.wait_queue); in pm_runtime_init()
1703 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1704 * @dev: Device object to re-initialize.
1709 if (dev->power.runtime_status == RPM_ACTIVE) in pm_runtime_reinit()
1711 if (dev->power.irq_safe) { in pm_runtime_reinit()
1712 spin_lock_irq(&dev->power.lock); in pm_runtime_reinit()
1713 dev->power.irq_safe = 0; in pm_runtime_reinit()
1714 spin_unlock_irq(&dev->power.lock); in pm_runtime_reinit()
1715 if (dev->parent) in pm_runtime_reinit()
1716 pm_runtime_put(dev->parent); in pm_runtime_reinit()
1722 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1732 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1742 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in pm_runtime_get_suppliers()
1744 if (link->flags & DL_FLAG_PM_RUNTIME) { in pm_runtime_get_suppliers()
1745 link->supplier_preactivated = true; in pm_runtime_get_suppliers()
1746 pm_runtime_get_sync(link->supplier); in pm_runtime_get_suppliers()
1747 refcount_inc(&link->rpm_active); in pm_runtime_get_suppliers()
1754 * pm_runtime_put_suppliers - Drop references to supplier devices.
1766 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in pm_runtime_put_suppliers()
1768 if (link->supplier_preactivated) { in pm_runtime_put_suppliers()
1769 link->supplier_preactivated = false; in pm_runtime_put_suppliers()
1770 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_put_suppliers()
1772 refcount_dec_not_one(&link->rpm_active); in pm_runtime_put_suppliers()
1773 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_put_suppliers()
1775 pm_runtime_put(link->supplier); in pm_runtime_put_suppliers()
1783 spin_lock_irq(&dev->power.lock); in pm_runtime_new_link()
1784 dev->power.links_count++; in pm_runtime_new_link()
1785 spin_unlock_irq(&dev->power.lock); in pm_runtime_new_link()
1790 spin_lock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1791 WARN_ON(dev->power.links_count == 0); in pm_runtime_drop_link_count()
1792 dev->power.links_count--; in pm_runtime_drop_link_count()
1793 spin_unlock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1797 * pm_runtime_drop_link - Prepare for device link removal.
1806 if (!(link->flags & DL_FLAG_PM_RUNTIME)) in pm_runtime_drop_link()
1809 pm_runtime_drop_link_count(link->consumer); in pm_runtime_drop_link()
1811 pm_request_idle(link->supplier); in pm_runtime_drop_link()
1816 return atomic_read(&dev->power.usage_count) <= 1 && in pm_runtime_need_not_resume()
1817 (atomic_read(&dev->power.child_count) == 0 || in pm_runtime_need_not_resume()
1818 dev->power.ignore_children); in pm_runtime_need_not_resume()
1822 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1823 * @dev: Device to suspend.
1826 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1828 * usage and children counters don't indicate that the device was in use before
1829 * the system-wide transition under way, decrement its parent's children counter
1833 * Typically this function may be invoked from a system suspend callback to make
1835 * system-wide PM transitions to sleep states. It assumes that the analogous
1854 * If the device can stay in suspend after the system-wide transition in pm_runtime_force_suspend()
1856 * its parent, but set its status to RPM_SUSPENDED anyway in case this in pm_runtime_force_suspend()
1857 * function will be called again for it in the meantime. in pm_runtime_force_suspend()
1863 dev->power.needs_force_resume = 1; in pm_runtime_force_suspend()
1875 * pm_runtime_force_resume - Force a device into resume state if needed.
1881 * used on system resume. In the other case, we defer the resume to be managed
1891 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) in pm_runtime_force_resume()
1910 dev->power.needs_force_resume = 0; in pm_runtime_force_resume()