• Home
  • Raw
  • Download

Lines Matching refs:dev

22 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)  in __rpm_get_callback()  argument
27 if (dev->pm_domain) in __rpm_get_callback()
28 ops = &dev->pm_domain->ops; in __rpm_get_callback()
29 else if (dev->type && dev->type->pm) in __rpm_get_callback()
30 ops = dev->type->pm; in __rpm_get_callback()
31 else if (dev->class && dev->class->pm) in __rpm_get_callback()
32 ops = dev->class->pm; in __rpm_get_callback()
33 else if (dev->bus && dev->bus->pm) in __rpm_get_callback()
34 ops = dev->bus->pm; in __rpm_get_callback()
43 if (!cb && dev->driver && dev->driver->pm) in __rpm_get_callback()
44 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); in __rpm_get_callback()
49 #define RPM_GET_CALLBACK(dev, callback) \ argument
50 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
52 static int rpm_resume(struct device *dev, int rpmflags);
53 static int rpm_suspend(struct device *dev, int rpmflags);
66 static void update_pm_runtime_accounting(struct device *dev) in update_pm_runtime_accounting() argument
70 if (dev->power.disable_depth > 0) in update_pm_runtime_accounting()
73 last = dev->power.accounting_timestamp; in update_pm_runtime_accounting()
76 dev->power.accounting_timestamp = now; in update_pm_runtime_accounting()
88 if (dev->power.runtime_status == RPM_SUSPENDED) in update_pm_runtime_accounting()
89 dev->power.suspended_time += delta; in update_pm_runtime_accounting()
91 dev->power.active_time += delta; in update_pm_runtime_accounting()
94 static void __update_runtime_status(struct device *dev, enum rpm_status status) in __update_runtime_status() argument
96 update_pm_runtime_accounting(dev); in __update_runtime_status()
97 trace_rpm_status(dev, status); in __update_runtime_status()
98 dev->power.runtime_status = status; in __update_runtime_status()
101 static u64 rpm_get_accounted_time(struct device *dev, bool suspended) in rpm_get_accounted_time() argument
106 spin_lock_irqsave(&dev->power.lock, flags); in rpm_get_accounted_time()
108 update_pm_runtime_accounting(dev); in rpm_get_accounted_time()
109 time = suspended ? dev->power.suspended_time : dev->power.active_time; in rpm_get_accounted_time()
111 spin_unlock_irqrestore(&dev->power.lock, flags); in rpm_get_accounted_time()
116 u64 pm_runtime_active_time(struct device *dev) in pm_runtime_active_time() argument
118 return rpm_get_accounted_time(dev, false); in pm_runtime_active_time()
121 u64 pm_runtime_suspended_time(struct device *dev) in pm_runtime_suspended_time() argument
123 return rpm_get_accounted_time(dev, true); in pm_runtime_suspended_time()
131 static void pm_runtime_deactivate_timer(struct device *dev) in pm_runtime_deactivate_timer() argument
133 if (dev->power.timer_expires > 0) { in pm_runtime_deactivate_timer()
134 hrtimer_try_to_cancel(&dev->power.suspend_timer); in pm_runtime_deactivate_timer()
135 dev->power.timer_expires = 0; in pm_runtime_deactivate_timer()
143 static void pm_runtime_cancel_pending(struct device *dev) in pm_runtime_cancel_pending() argument
145 pm_runtime_deactivate_timer(dev); in pm_runtime_cancel_pending()
150 dev->power.request = RPM_REQ_NONE; in pm_runtime_cancel_pending()
165 u64 pm_runtime_autosuspend_expiration(struct device *dev) in pm_runtime_autosuspend_expiration() argument
170 if (!dev->power.use_autosuspend) in pm_runtime_autosuspend_expiration()
173 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); in pm_runtime_autosuspend_expiration()
177 expires = READ_ONCE(dev->power.last_busy); in pm_runtime_autosuspend_expiration()
186 static int dev_memalloc_noio(struct device *dev, void *data) in dev_memalloc_noio() argument
188 return dev->power.memalloc_noio; in dev_memalloc_noio()
219 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) in pm_runtime_set_memalloc_noio() argument
228 spin_lock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
229 enabled = dev->power.memalloc_noio; in pm_runtime_set_memalloc_noio()
230 dev->power.memalloc_noio = enable; in pm_runtime_set_memalloc_noio()
231 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
240 dev = dev->parent; in pm_runtime_set_memalloc_noio()
247 if (!dev || (!enable && in pm_runtime_set_memalloc_noio()
248 device_for_each_child(dev, NULL, dev_memalloc_noio))) in pm_runtime_set_memalloc_noio()
259 static int rpm_check_suspend_allowed(struct device *dev) in rpm_check_suspend_allowed() argument
263 if (dev->power.runtime_error) in rpm_check_suspend_allowed()
265 else if (dev->power.disable_depth > 0) in rpm_check_suspend_allowed()
267 else if (atomic_read(&dev->power.usage_count)) in rpm_check_suspend_allowed()
269 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count)) in rpm_check_suspend_allowed()
273 else if ((dev->power.deferred_resume && in rpm_check_suspend_allowed()
274 dev->power.runtime_status == RPM_SUSPENDING) || in rpm_check_suspend_allowed()
275 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) in rpm_check_suspend_allowed()
277 else if (__dev_pm_qos_resume_latency(dev) == 0) in rpm_check_suspend_allowed()
279 else if (dev->power.runtime_status == RPM_SUSPENDED) in rpm_check_suspend_allowed()
285 static int rpm_get_suppliers(struct device *dev) in rpm_get_suppliers() argument
289 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in rpm_get_suppliers()
328 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) in __rpm_put_suppliers() argument
332 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in __rpm_put_suppliers()
340 static void rpm_put_suppliers(struct device *dev) in rpm_put_suppliers() argument
342 __rpm_put_suppliers(dev, true); in rpm_put_suppliers()
345 static void rpm_suspend_suppliers(struct device *dev) in rpm_suspend_suppliers() argument
350 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in rpm_suspend_suppliers()
362 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) in __rpm_callback() argument
363 __releases(&dev->power.lock) __acquires(&dev->power.lock) in __rpm_callback()
366 bool use_links = dev->power.links_count > 0; in __rpm_callback()
368 if (dev->power.irq_safe) { in __rpm_callback()
369 spin_unlock(&dev->power.lock); in __rpm_callback()
371 spin_unlock_irq(&dev->power.lock); in __rpm_callback()
380 if (use_links && dev->power.runtime_status == RPM_RESUMING) { in __rpm_callback()
383 retval = rpm_get_suppliers(dev); in __rpm_callback()
385 rpm_put_suppliers(dev); in __rpm_callback()
394 retval = cb(dev); in __rpm_callback()
396 if (dev->power.irq_safe) { in __rpm_callback()
397 spin_lock(&dev->power.lock); in __rpm_callback()
407 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) || in __rpm_callback()
408 (dev->power.runtime_status == RPM_RESUMING && retval))) { in __rpm_callback()
411 __rpm_put_suppliers(dev, false); in __rpm_callback()
417 spin_lock_irq(&dev->power.lock); in __rpm_callback()
428 static int rpm_callback(int (*cb)(struct device *), struct device *dev) in rpm_callback() argument
432 if (dev->power.memalloc_noio) { in rpm_callback()
445 retval = __rpm_callback(cb, dev); in rpm_callback()
448 retval = __rpm_callback(cb, dev); in rpm_callback()
451 dev->power.runtime_error = retval; in rpm_callback()
468 static int rpm_idle(struct device *dev, int rpmflags) in rpm_idle() argument
473 trace_rpm_idle(dev, rpmflags); in rpm_idle()
474 retval = rpm_check_suspend_allowed(dev); in rpm_idle()
479 else if (dev->power.runtime_status != RPM_ACTIVE) in rpm_idle()
486 else if (dev->power.request_pending && in rpm_idle()
487 dev->power.request > RPM_REQ_IDLE) in rpm_idle()
491 else if (dev->power.idle_notification) in rpm_idle()
498 dev->power.request = RPM_REQ_NONE; in rpm_idle()
500 callback = RPM_GET_CALLBACK(dev, runtime_idle); in rpm_idle()
503 if (!callback || dev->power.no_callbacks) in rpm_idle()
508 dev->power.request = RPM_REQ_IDLE; in rpm_idle()
509 if (!dev->power.request_pending) { in rpm_idle()
510 dev->power.request_pending = true; in rpm_idle()
511 queue_work(pm_wq, &dev->power.work); in rpm_idle()
513 trace_rpm_return_int(dev, _THIS_IP_, 0); in rpm_idle()
517 dev->power.idle_notification = true; in rpm_idle()
519 if (dev->power.irq_safe) in rpm_idle()
520 spin_unlock(&dev->power.lock); in rpm_idle()
522 spin_unlock_irq(&dev->power.lock); in rpm_idle()
524 retval = callback(dev); in rpm_idle()
526 if (dev->power.irq_safe) in rpm_idle()
527 spin_lock(&dev->power.lock); in rpm_idle()
529 spin_lock_irq(&dev->power.lock); in rpm_idle()
531 dev->power.idle_notification = false; in rpm_idle()
532 wake_up_all(&dev->power.wait_queue); in rpm_idle()
535 trace_rpm_return_int(dev, _THIS_IP_, retval); in rpm_idle()
536 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); in rpm_idle()
560 static int rpm_suspend(struct device *dev, int rpmflags) in rpm_suspend() argument
561 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_suspend()
567 trace_rpm_suspend(dev, rpmflags); in rpm_suspend()
570 retval = rpm_check_suspend_allowed(dev); in rpm_suspend()
575 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) in rpm_suspend()
582 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) { in rpm_suspend()
583 u64 expires = pm_runtime_autosuspend_expiration(dev); in rpm_suspend()
587 dev->power.request = RPM_REQ_NONE; in rpm_suspend()
596 if (!(dev->power.timer_expires && in rpm_suspend()
597 dev->power.timer_expires <= expires)) { in rpm_suspend()
602 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * in rpm_suspend()
605 dev->power.timer_expires = expires; in rpm_suspend()
606 hrtimer_start_range_ns(&dev->power.suspend_timer, in rpm_suspend()
611 dev->power.timer_autosuspends = 1; in rpm_suspend()
617 pm_runtime_cancel_pending(dev); in rpm_suspend()
619 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_suspend()
627 if (dev->power.irq_safe) { in rpm_suspend()
628 spin_unlock(&dev->power.lock); in rpm_suspend()
632 spin_lock(&dev->power.lock); in rpm_suspend()
638 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_suspend()
640 if (dev->power.runtime_status != RPM_SUSPENDING) in rpm_suspend()
643 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
647 spin_lock_irq(&dev->power.lock); in rpm_suspend()
649 finish_wait(&dev->power.wait_queue, &wait); in rpm_suspend()
653 if (dev->power.no_callbacks) in rpm_suspend()
658 dev->power.request = (rpmflags & RPM_AUTO) ? in rpm_suspend()
660 if (!dev->power.request_pending) { in rpm_suspend()
661 dev->power.request_pending = true; in rpm_suspend()
662 queue_work(pm_wq, &dev->power.work); in rpm_suspend()
667 __update_runtime_status(dev, RPM_SUSPENDING); in rpm_suspend()
669 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in rpm_suspend()
671 dev_pm_enable_wake_irq_check(dev, true); in rpm_suspend()
672 retval = rpm_callback(callback, dev); in rpm_suspend()
676 dev_pm_enable_wake_irq_complete(dev); in rpm_suspend()
679 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_suspend()
680 pm_runtime_deactivate_timer(dev); in rpm_suspend()
682 if (dev->parent) { in rpm_suspend()
683 parent = dev->parent; in rpm_suspend()
686 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
688 if (dev->power.deferred_resume) { in rpm_suspend()
689 dev->power.deferred_resume = false; in rpm_suspend()
690 rpm_resume(dev, 0); in rpm_suspend()
695 if (dev->power.irq_safe) in rpm_suspend()
700 spin_unlock(&dev->power.lock); in rpm_suspend()
706 spin_lock(&dev->power.lock); in rpm_suspend()
709 if (dev->power.links_count > 0) { in rpm_suspend()
710 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
712 rpm_suspend_suppliers(dev); in rpm_suspend()
714 spin_lock_irq(&dev->power.lock); in rpm_suspend()
718 trace_rpm_return_int(dev, _THIS_IP_, retval); in rpm_suspend()
723 dev_pm_disable_wake_irq_check(dev, true); in rpm_suspend()
724 __update_runtime_status(dev, RPM_ACTIVE); in rpm_suspend()
725 dev->power.deferred_resume = false; in rpm_suspend()
726 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
729 dev->power.runtime_error = 0; in rpm_suspend()
738 pm_runtime_autosuspend_expiration(dev) != 0) in rpm_suspend()
741 pm_runtime_cancel_pending(dev); in rpm_suspend()
763 static int rpm_resume(struct device *dev, int rpmflags) in rpm_resume() argument
764 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_resume()
770 trace_rpm_resume(dev, rpmflags); in rpm_resume()
773 if (dev->power.runtime_error) { in rpm_resume()
775 } else if (dev->power.disable_depth > 0) { in rpm_resume()
776 if (dev->power.runtime_status == RPM_ACTIVE && in rpm_resume()
777 dev->power.last_status == RPM_ACTIVE) in rpm_resume()
791 dev->power.request = RPM_REQ_NONE; in rpm_resume()
792 if (!dev->power.timer_autosuspends) in rpm_resume()
793 pm_runtime_deactivate_timer(dev); in rpm_resume()
795 if (dev->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
800 if (dev->power.runtime_status == RPM_RESUMING || in rpm_resume()
801 dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
805 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
806 dev->power.deferred_resume = true; in rpm_resume()
815 if (dev->power.irq_safe) { in rpm_resume()
816 spin_unlock(&dev->power.lock); in rpm_resume()
820 spin_lock(&dev->power.lock); in rpm_resume()
826 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_resume()
828 if (dev->power.runtime_status != RPM_RESUMING && in rpm_resume()
829 dev->power.runtime_status != RPM_SUSPENDING) in rpm_resume()
832 spin_unlock_irq(&dev->power.lock); in rpm_resume()
836 spin_lock_irq(&dev->power.lock); in rpm_resume()
838 finish_wait(&dev->power.wait_queue, &wait); in rpm_resume()
847 if (dev->power.no_callbacks && !parent && dev->parent) { in rpm_resume()
848 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); in rpm_resume()
849 if (dev->parent->power.disable_depth > 0 || in rpm_resume()
850 dev->parent->power.ignore_children || in rpm_resume()
851 dev->parent->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
852 atomic_inc(&dev->parent->power.child_count); in rpm_resume()
853 spin_unlock(&dev->parent->power.lock); in rpm_resume()
857 spin_unlock(&dev->parent->power.lock); in rpm_resume()
862 dev->power.request = RPM_REQ_RESUME; in rpm_resume()
863 if (!dev->power.request_pending) { in rpm_resume()
864 dev->power.request_pending = true; in rpm_resume()
865 queue_work(pm_wq, &dev->power.work); in rpm_resume()
871 if (!parent && dev->parent) { in rpm_resume()
877 parent = dev->parent; in rpm_resume()
878 if (dev->power.irq_safe) in rpm_resume()
881 spin_unlock(&dev->power.lock); in rpm_resume()
898 spin_lock(&dev->power.lock); in rpm_resume()
906 if (dev->power.no_callbacks) in rpm_resume()
909 __update_runtime_status(dev, RPM_RESUMING); in rpm_resume()
911 callback = RPM_GET_CALLBACK(dev, runtime_resume); in rpm_resume()
913 dev_pm_disable_wake_irq_check(dev, false); in rpm_resume()
914 retval = rpm_callback(callback, dev); in rpm_resume()
916 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_resume()
917 pm_runtime_cancel_pending(dev); in rpm_resume()
918 dev_pm_enable_wake_irq_check(dev, false); in rpm_resume()
921 __update_runtime_status(dev, RPM_ACTIVE); in rpm_resume()
922 pm_runtime_mark_last_busy(dev); in rpm_resume()
926 wake_up_all(&dev->power.wait_queue); in rpm_resume()
929 rpm_idle(dev, RPM_ASYNC); in rpm_resume()
932 if (parent && !dev->power.irq_safe) { in rpm_resume()
933 spin_unlock_irq(&dev->power.lock); in rpm_resume()
937 spin_lock_irq(&dev->power.lock); in rpm_resume()
940 trace_rpm_return_int(dev, _THIS_IP_, retval); in rpm_resume()
954 struct device *dev = container_of(work, struct device, power.work); in pm_runtime_work() local
957 spin_lock_irq(&dev->power.lock); in pm_runtime_work()
959 if (!dev->power.request_pending) in pm_runtime_work()
962 req = dev->power.request; in pm_runtime_work()
963 dev->power.request = RPM_REQ_NONE; in pm_runtime_work()
964 dev->power.request_pending = false; in pm_runtime_work()
970 rpm_idle(dev, RPM_NOWAIT); in pm_runtime_work()
973 rpm_suspend(dev, RPM_NOWAIT); in pm_runtime_work()
976 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); in pm_runtime_work()
979 rpm_resume(dev, RPM_NOWAIT); in pm_runtime_work()
984 spin_unlock_irq(&dev->power.lock); in pm_runtime_work()
995 struct device *dev = container_of(timer, struct device, power.suspend_timer); in pm_suspend_timer_fn() local
999 spin_lock_irqsave(&dev->power.lock, flags); in pm_suspend_timer_fn()
1001 expires = dev->power.timer_expires; in pm_suspend_timer_fn()
1007 dev->power.timer_expires = 0; in pm_suspend_timer_fn()
1008 rpm_suspend(dev, dev->power.timer_autosuspends ? in pm_suspend_timer_fn()
1012 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_suspend_timer_fn()
1022 int pm_schedule_suspend(struct device *dev, unsigned int delay) in pm_schedule_suspend() argument
1028 spin_lock_irqsave(&dev->power.lock, flags); in pm_schedule_suspend()
1031 retval = rpm_suspend(dev, RPM_ASYNC); in pm_schedule_suspend()
1035 retval = rpm_check_suspend_allowed(dev); in pm_schedule_suspend()
1040 pm_runtime_cancel_pending(dev); in pm_schedule_suspend()
1043 dev->power.timer_expires = expires; in pm_schedule_suspend()
1044 dev->power.timer_autosuspends = 0; in pm_schedule_suspend()
1045 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); in pm_schedule_suspend()
1048 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_schedule_suspend()
1054 static int rpm_drop_usage_count(struct device *dev) in rpm_drop_usage_count() argument
1058 ret = atomic_sub_return(1, &dev->power.usage_count); in rpm_drop_usage_count()
1068 atomic_inc(&dev->power.usage_count); in rpm_drop_usage_count()
1069 dev_warn(dev, "Runtime PM usage count underflow!\n"); in rpm_drop_usage_count()
1086 int __pm_runtime_idle(struct device *dev, int rpmflags) in __pm_runtime_idle() argument
1092 retval = rpm_drop_usage_count(dev); in __pm_runtime_idle()
1096 trace_rpm_usage(dev, rpmflags); in __pm_runtime_idle()
1101 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_idle()
1103 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_idle()
1104 retval = rpm_idle(dev, rpmflags); in __pm_runtime_idle()
1105 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_idle()
1124 int __pm_runtime_suspend(struct device *dev, int rpmflags) in __pm_runtime_suspend() argument
1130 retval = rpm_drop_usage_count(dev); in __pm_runtime_suspend()
1134 trace_rpm_usage(dev, rpmflags); in __pm_runtime_suspend()
1139 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_suspend()
1141 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_suspend()
1142 retval = rpm_suspend(dev, rpmflags); in __pm_runtime_suspend()
1143 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_suspend()
1160 int __pm_runtime_resume(struct device *dev, int rpmflags) in __pm_runtime_resume() argument
1165 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && in __pm_runtime_resume()
1166 dev->power.runtime_status != RPM_ACTIVE); in __pm_runtime_resume()
1169 atomic_inc(&dev->power.usage_count); in __pm_runtime_resume()
1171 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_resume()
1172 retval = rpm_resume(dev, rpmflags); in __pm_runtime_resume()
1173 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_resume()
1203 static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count) in pm_runtime_get_conditional() argument
1208 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_get_conditional()
1209 if (dev->power.disable_depth > 0) { in pm_runtime_get_conditional()
1211 } else if (dev->power.runtime_status != RPM_ACTIVE) { in pm_runtime_get_conditional()
1213 } else if (ign_usage_count || (!dev->power.ignore_children && in pm_runtime_get_conditional()
1214 atomic_read(&dev->power.child_count) > 0)) { in pm_runtime_get_conditional()
1216 atomic_inc(&dev->power.usage_count); in pm_runtime_get_conditional()
1218 retval = atomic_inc_not_zero(&dev->power.usage_count); in pm_runtime_get_conditional()
1220 trace_rpm_usage(dev, 0); in pm_runtime_get_conditional()
1221 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_get_conditional()
1236 int pm_runtime_get_if_active(struct device *dev) in pm_runtime_get_if_active() argument
1238 return pm_runtime_get_conditional(dev, true); in pm_runtime_get_if_active()
1258 int pm_runtime_get_if_in_use(struct device *dev) in pm_runtime_get_if_in_use() argument
1260 return pm_runtime_get_conditional(dev, false); in pm_runtime_get_if_in_use()
1288 int __pm_runtime_set_status(struct device *dev, unsigned int status) in __pm_runtime_set_status() argument
1290 struct device *parent = dev->parent; in __pm_runtime_set_status()
1298 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
1304 if (dev->power.runtime_error || dev->power.disable_depth) in __pm_runtime_set_status()
1305 dev->power.disable_depth++; in __pm_runtime_set_status()
1309 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1323 error = rpm_get_suppliers(dev); in __pm_runtime_set_status()
1330 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
1332 if (dev->power.runtime_status == status || !parent) in __pm_runtime_set_status()
1349 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", in __pm_runtime_set_status()
1350 dev_name(dev), in __pm_runtime_set_status()
1353 } else if (dev->power.runtime_status == RPM_SUSPENDED) { in __pm_runtime_set_status()
1366 __update_runtime_status(dev, status); in __pm_runtime_set_status()
1368 dev->power.runtime_error = 0; in __pm_runtime_set_status()
1371 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1379 rpm_put_suppliers(dev); in __pm_runtime_set_status()
1384 pm_runtime_enable(dev); in __pm_runtime_set_status()
1399 static void __pm_runtime_barrier(struct device *dev) in __pm_runtime_barrier() argument
1401 pm_runtime_deactivate_timer(dev); in __pm_runtime_barrier()
1403 if (dev->power.request_pending) { in __pm_runtime_barrier()
1404 dev->power.request = RPM_REQ_NONE; in __pm_runtime_barrier()
1405 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1407 cancel_work_sync(&dev->power.work); in __pm_runtime_barrier()
1409 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1410 dev->power.request_pending = false; in __pm_runtime_barrier()
1413 if (dev->power.runtime_status == RPM_SUSPENDING || in __pm_runtime_barrier()
1414 dev->power.runtime_status == RPM_RESUMING || in __pm_runtime_barrier()
1415 dev->power.idle_notification) { in __pm_runtime_barrier()
1420 prepare_to_wait(&dev->power.wait_queue, &wait, in __pm_runtime_barrier()
1422 if (dev->power.runtime_status != RPM_SUSPENDING in __pm_runtime_barrier()
1423 && dev->power.runtime_status != RPM_RESUMING in __pm_runtime_barrier()
1424 && !dev->power.idle_notification) in __pm_runtime_barrier()
1426 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1430 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1432 finish_wait(&dev->power.wait_queue, &wait); in __pm_runtime_barrier()
1450 int pm_runtime_barrier(struct device *dev) in pm_runtime_barrier() argument
1454 pm_runtime_get_noresume(dev); in pm_runtime_barrier()
1455 spin_lock_irq(&dev->power.lock); in pm_runtime_barrier()
1457 if (dev->power.request_pending in pm_runtime_barrier()
1458 && dev->power.request == RPM_REQ_RESUME) { in pm_runtime_barrier()
1459 rpm_resume(dev, 0); in pm_runtime_barrier()
1463 __pm_runtime_barrier(dev); in pm_runtime_barrier()
1465 spin_unlock_irq(&dev->power.lock); in pm_runtime_barrier()
1466 pm_runtime_put_noidle(dev); in pm_runtime_barrier()
1486 void __pm_runtime_disable(struct device *dev, bool check_resume) in __pm_runtime_disable() argument
1488 spin_lock_irq(&dev->power.lock); in __pm_runtime_disable()
1490 if (dev->power.disable_depth > 0) { in __pm_runtime_disable()
1491 dev->power.disable_depth++; in __pm_runtime_disable()
1500 if (check_resume && dev->power.request_pending && in __pm_runtime_disable()
1501 dev->power.request == RPM_REQ_RESUME) { in __pm_runtime_disable()
1506 pm_runtime_get_noresume(dev); in __pm_runtime_disable()
1508 rpm_resume(dev, 0); in __pm_runtime_disable()
1510 pm_runtime_put_noidle(dev); in __pm_runtime_disable()
1514 update_pm_runtime_accounting(dev); in __pm_runtime_disable()
1516 if (!dev->power.disable_depth++) { in __pm_runtime_disable()
1517 __pm_runtime_barrier(dev); in __pm_runtime_disable()
1518 dev->power.last_status = dev->power.runtime_status; in __pm_runtime_disable()
1522 spin_unlock_irq(&dev->power.lock); in __pm_runtime_disable()
1530 void pm_runtime_enable(struct device *dev) in pm_runtime_enable() argument
1534 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_enable()
1536 if (!dev->power.disable_depth) { in pm_runtime_enable()
1537 dev_warn(dev, "Unbalanced %s!\n", __func__); in pm_runtime_enable()
1541 if (--dev->power.disable_depth > 0) in pm_runtime_enable()
1544 dev->power.last_status = RPM_INVALID; in pm_runtime_enable()
1545 dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); in pm_runtime_enable()
1547 if (dev->power.runtime_status == RPM_SUSPENDED && in pm_runtime_enable()
1548 !dev->power.ignore_children && in pm_runtime_enable()
1549 atomic_read(&dev->power.child_count) > 0) in pm_runtime_enable()
1550 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n"); in pm_runtime_enable()
1553 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_enable()
1571 int devm_pm_runtime_enable(struct device *dev) in devm_pm_runtime_enable() argument
1573 pm_runtime_enable(dev); in devm_pm_runtime_enable()
1575 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev); in devm_pm_runtime_enable()
1587 void pm_runtime_forbid(struct device *dev) in pm_runtime_forbid() argument
1589 spin_lock_irq(&dev->power.lock); in pm_runtime_forbid()
1590 if (!dev->power.runtime_auto) in pm_runtime_forbid()
1593 dev->power.runtime_auto = false; in pm_runtime_forbid()
1594 atomic_inc(&dev->power.usage_count); in pm_runtime_forbid()
1595 rpm_resume(dev, 0); in pm_runtime_forbid()
1598 spin_unlock_irq(&dev->power.lock); in pm_runtime_forbid()
1608 void pm_runtime_allow(struct device *dev) in pm_runtime_allow() argument
1612 spin_lock_irq(&dev->power.lock); in pm_runtime_allow()
1613 if (dev->power.runtime_auto) in pm_runtime_allow()
1616 dev->power.runtime_auto = true; in pm_runtime_allow()
1617 ret = rpm_drop_usage_count(dev); in pm_runtime_allow()
1619 rpm_idle(dev, RPM_AUTO | RPM_ASYNC); in pm_runtime_allow()
1621 trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC); in pm_runtime_allow()
1624 spin_unlock_irq(&dev->power.lock); in pm_runtime_allow()
1636 void pm_runtime_no_callbacks(struct device *dev) in pm_runtime_no_callbacks() argument
1638 spin_lock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1639 dev->power.no_callbacks = 1; in pm_runtime_no_callbacks()
1640 spin_unlock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1641 if (device_is_registered(dev)) in pm_runtime_no_callbacks()
1642 rpm_sysfs_remove(dev); in pm_runtime_no_callbacks()
1657 void pm_runtime_irq_safe(struct device *dev) in pm_runtime_irq_safe() argument
1659 if (dev->parent) in pm_runtime_irq_safe()
1660 pm_runtime_get_sync(dev->parent); in pm_runtime_irq_safe()
1662 spin_lock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1663 dev->power.irq_safe = 1; in pm_runtime_irq_safe()
1664 spin_unlock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1679 static void update_autosuspend(struct device *dev, int old_delay, int old_use) in update_autosuspend() argument
1681 int delay = dev->power.autosuspend_delay; in update_autosuspend()
1684 if (dev->power.use_autosuspend && delay < 0) { in update_autosuspend()
1688 atomic_inc(&dev->power.usage_count); in update_autosuspend()
1689 rpm_resume(dev, 0); in update_autosuspend()
1691 trace_rpm_usage(dev, 0); in update_autosuspend()
1700 atomic_dec(&dev->power.usage_count); in update_autosuspend()
1703 rpm_idle(dev, RPM_AUTO); in update_autosuspend()
1716 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) in pm_runtime_set_autosuspend_delay() argument
1720 spin_lock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1721 old_delay = dev->power.autosuspend_delay; in pm_runtime_set_autosuspend_delay()
1722 old_use = dev->power.use_autosuspend; in pm_runtime_set_autosuspend_delay()
1723 dev->power.autosuspend_delay = delay; in pm_runtime_set_autosuspend_delay()
1724 update_autosuspend(dev, old_delay, old_use); in pm_runtime_set_autosuspend_delay()
1725 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1737 void __pm_runtime_use_autosuspend(struct device *dev, bool use) in __pm_runtime_use_autosuspend() argument
1741 spin_lock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1742 old_delay = dev->power.autosuspend_delay; in __pm_runtime_use_autosuspend()
1743 old_use = dev->power.use_autosuspend; in __pm_runtime_use_autosuspend()
1744 dev->power.use_autosuspend = use; in __pm_runtime_use_autosuspend()
1745 update_autosuspend(dev, old_delay, old_use); in __pm_runtime_use_autosuspend()
1746 spin_unlock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1754 void pm_runtime_init(struct device *dev) in pm_runtime_init() argument
1756 dev->power.runtime_status = RPM_SUSPENDED; in pm_runtime_init()
1757 dev->power.last_status = RPM_INVALID; in pm_runtime_init()
1758 dev->power.idle_notification = false; in pm_runtime_init()
1760 dev->power.disable_depth = 1; in pm_runtime_init()
1761 atomic_set(&dev->power.usage_count, 0); in pm_runtime_init()
1763 dev->power.runtime_error = 0; in pm_runtime_init()
1765 atomic_set(&dev->power.child_count, 0); in pm_runtime_init()
1766 pm_suspend_ignore_children(dev, false); in pm_runtime_init()
1767 dev->power.runtime_auto = true; in pm_runtime_init()
1769 dev->power.request_pending = false; in pm_runtime_init()
1770 dev->power.request = RPM_REQ_NONE; in pm_runtime_init()
1771 dev->power.deferred_resume = false; in pm_runtime_init()
1772 dev->power.needs_force_resume = 0; in pm_runtime_init()
1773 INIT_WORK(&dev->power.work, pm_runtime_work); in pm_runtime_init()
1775 dev->power.timer_expires = 0; in pm_runtime_init()
1776 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); in pm_runtime_init()
1777 dev->power.suspend_timer.function = pm_suspend_timer_fn; in pm_runtime_init()
1779 init_waitqueue_head(&dev->power.wait_queue); in pm_runtime_init()
1786 void pm_runtime_reinit(struct device *dev) in pm_runtime_reinit() argument
1788 if (!pm_runtime_enabled(dev)) { in pm_runtime_reinit()
1789 if (dev->power.runtime_status == RPM_ACTIVE) in pm_runtime_reinit()
1790 pm_runtime_set_suspended(dev); in pm_runtime_reinit()
1791 if (dev->power.irq_safe) { in pm_runtime_reinit()
1792 spin_lock_irq(&dev->power.lock); in pm_runtime_reinit()
1793 dev->power.irq_safe = 0; in pm_runtime_reinit()
1794 spin_unlock_irq(&dev->power.lock); in pm_runtime_reinit()
1795 if (dev->parent) in pm_runtime_reinit()
1796 pm_runtime_put(dev->parent); in pm_runtime_reinit()
1803 dev->power.needs_force_resume = false; in pm_runtime_reinit()
1810 void pm_runtime_remove(struct device *dev) in pm_runtime_remove() argument
1812 __pm_runtime_disable(dev, false); in pm_runtime_remove()
1813 pm_runtime_reinit(dev); in pm_runtime_remove()
1820 void pm_runtime_get_suppliers(struct device *dev) in pm_runtime_get_suppliers() argument
1827 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in pm_runtime_get_suppliers()
1841 void pm_runtime_put_suppliers(struct device *dev) in pm_runtime_put_suppliers() argument
1848 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in pm_runtime_put_suppliers()
1858 void pm_runtime_new_link(struct device *dev) in pm_runtime_new_link() argument
1860 spin_lock_irq(&dev->power.lock); in pm_runtime_new_link()
1861 dev->power.links_count++; in pm_runtime_new_link()
1862 spin_unlock_irq(&dev->power.lock); in pm_runtime_new_link()
1865 static void pm_runtime_drop_link_count(struct device *dev) in pm_runtime_drop_link_count() argument
1867 spin_lock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1868 WARN_ON(dev->power.links_count == 0); in pm_runtime_drop_link_count()
1869 dev->power.links_count--; in pm_runtime_drop_link_count()
1870 spin_unlock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1891 bool pm_runtime_need_not_resume(struct device *dev) in pm_runtime_need_not_resume() argument
1893 return atomic_read(&dev->power.usage_count) <= 1 && in pm_runtime_need_not_resume()
1894 (atomic_read(&dev->power.child_count) == 0 || in pm_runtime_need_not_resume()
1895 dev->power.ignore_children); in pm_runtime_need_not_resume()
1919 int pm_runtime_force_suspend(struct device *dev) in pm_runtime_force_suspend() argument
1924 pm_runtime_disable(dev); in pm_runtime_force_suspend()
1925 if (pm_runtime_status_suspended(dev)) in pm_runtime_force_suspend()
1928 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in pm_runtime_force_suspend()
1930 dev_pm_enable_wake_irq_check(dev, true); in pm_runtime_force_suspend()
1931 ret = callback ? callback(dev) : 0; in pm_runtime_force_suspend()
1935 dev_pm_enable_wake_irq_complete(dev); in pm_runtime_force_suspend()
1943 if (pm_runtime_need_not_resume(dev)) { in pm_runtime_force_suspend()
1944 pm_runtime_set_suspended(dev); in pm_runtime_force_suspend()
1946 __update_runtime_status(dev, RPM_SUSPENDED); in pm_runtime_force_suspend()
1947 dev->power.needs_force_resume = 1; in pm_runtime_force_suspend()
1953 dev_pm_disable_wake_irq_check(dev, true); in pm_runtime_force_suspend()
1954 pm_runtime_enable(dev); in pm_runtime_force_suspend()
1971 int pm_runtime_force_resume(struct device *dev) in pm_runtime_force_resume() argument
1976 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) in pm_runtime_force_resume()
1983 __update_runtime_status(dev, RPM_ACTIVE); in pm_runtime_force_resume()
1985 callback = RPM_GET_CALLBACK(dev, runtime_resume); in pm_runtime_force_resume()
1987 dev_pm_disable_wake_irq_check(dev, false); in pm_runtime_force_resume()
1988 ret = callback ? callback(dev) : 0; in pm_runtime_force_resume()
1990 pm_runtime_set_suspended(dev); in pm_runtime_force_resume()
1991 dev_pm_enable_wake_irq_check(dev, false); in pm_runtime_force_resume()
1995 pm_runtime_mark_last_busy(dev); in pm_runtime_force_resume()
1997 dev->power.needs_force_resume = 0; in pm_runtime_force_resume()
1998 pm_runtime_enable(dev); in pm_runtime_force_resume()