• Home
  • Raw
  • Download

Lines Matching refs:dev

21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)  in __rpm_get_callback()  argument
26 if (dev->pm_domain) in __rpm_get_callback()
27 ops = &dev->pm_domain->ops; in __rpm_get_callback()
28 else if (dev->type && dev->type->pm) in __rpm_get_callback()
29 ops = dev->type->pm; in __rpm_get_callback()
30 else if (dev->class && dev->class->pm) in __rpm_get_callback()
31 ops = dev->class->pm; in __rpm_get_callback()
32 else if (dev->bus && dev->bus->pm) in __rpm_get_callback()
33 ops = dev->bus->pm; in __rpm_get_callback()
42 if (!cb && dev->driver && dev->driver->pm) in __rpm_get_callback()
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); in __rpm_get_callback()
48 #define RPM_GET_CALLBACK(dev, callback) \ argument
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
65 static void update_pm_runtime_accounting(struct device *dev) in update_pm_runtime_accounting() argument
69 if (dev->power.disable_depth > 0) in update_pm_runtime_accounting()
72 last = dev->power.accounting_timestamp; in update_pm_runtime_accounting()
75 dev->power.accounting_timestamp = now; in update_pm_runtime_accounting()
87 if (dev->power.runtime_status == RPM_SUSPENDED) in update_pm_runtime_accounting()
88 dev->power.suspended_time += delta; in update_pm_runtime_accounting()
90 dev->power.active_time += delta; in update_pm_runtime_accounting()
93 static void __update_runtime_status(struct device *dev, enum rpm_status status) in __update_runtime_status() argument
95 update_pm_runtime_accounting(dev); in __update_runtime_status()
96 trace_rpm_status(dev, status); in __update_runtime_status()
97 dev->power.runtime_status = status; in __update_runtime_status()
100 static u64 rpm_get_accounted_time(struct device *dev, bool suspended) in rpm_get_accounted_time() argument
105 spin_lock_irqsave(&dev->power.lock, flags); in rpm_get_accounted_time()
107 update_pm_runtime_accounting(dev); in rpm_get_accounted_time()
108 time = suspended ? dev->power.suspended_time : dev->power.active_time; in rpm_get_accounted_time()
110 spin_unlock_irqrestore(&dev->power.lock, flags); in rpm_get_accounted_time()
115 u64 pm_runtime_active_time(struct device *dev) in pm_runtime_active_time() argument
117 return rpm_get_accounted_time(dev, false); in pm_runtime_active_time()
120 u64 pm_runtime_suspended_time(struct device *dev) in pm_runtime_suspended_time() argument
122 return rpm_get_accounted_time(dev, true); in pm_runtime_suspended_time()
130 static void pm_runtime_deactivate_timer(struct device *dev) in pm_runtime_deactivate_timer() argument
132 if (dev->power.timer_expires > 0) { in pm_runtime_deactivate_timer()
133 hrtimer_try_to_cancel(&dev->power.suspend_timer); in pm_runtime_deactivate_timer()
134 dev->power.timer_expires = 0; in pm_runtime_deactivate_timer()
142 static void pm_runtime_cancel_pending(struct device *dev) in pm_runtime_cancel_pending() argument
144 pm_runtime_deactivate_timer(dev); in pm_runtime_cancel_pending()
149 dev->power.request = RPM_REQ_NONE; in pm_runtime_cancel_pending()
164 u64 pm_runtime_autosuspend_expiration(struct device *dev) in pm_runtime_autosuspend_expiration() argument
169 if (!dev->power.use_autosuspend) in pm_runtime_autosuspend_expiration()
172 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); in pm_runtime_autosuspend_expiration()
176 expires = READ_ONCE(dev->power.last_busy); in pm_runtime_autosuspend_expiration()
185 static int dev_memalloc_noio(struct device *dev, void *data) in dev_memalloc_noio() argument
187 return dev->power.memalloc_noio; in dev_memalloc_noio()
218 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) in pm_runtime_set_memalloc_noio() argument
227 spin_lock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
228 enabled = dev->power.memalloc_noio; in pm_runtime_set_memalloc_noio()
229 dev->power.memalloc_noio = enable; in pm_runtime_set_memalloc_noio()
230 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
239 dev = dev->parent; in pm_runtime_set_memalloc_noio()
246 if (!dev || (!enable && in pm_runtime_set_memalloc_noio()
247 device_for_each_child(dev, NULL, in pm_runtime_set_memalloc_noio()
259 static int rpm_check_suspend_allowed(struct device *dev) in rpm_check_suspend_allowed() argument
263 if (dev->power.runtime_error) in rpm_check_suspend_allowed()
265 else if (dev->power.disable_depth > 0) in rpm_check_suspend_allowed()
267 else if (atomic_read(&dev->power.usage_count)) in rpm_check_suspend_allowed()
269 else if (!dev->power.ignore_children && in rpm_check_suspend_allowed()
270 atomic_read(&dev->power.child_count)) in rpm_check_suspend_allowed()
274 else if ((dev->power.deferred_resume in rpm_check_suspend_allowed()
275 && dev->power.runtime_status == RPM_SUSPENDING) in rpm_check_suspend_allowed()
276 || (dev->power.request_pending in rpm_check_suspend_allowed()
277 && dev->power.request == RPM_REQ_RESUME)) in rpm_check_suspend_allowed()
279 else if (__dev_pm_qos_resume_latency(dev) == 0) in rpm_check_suspend_allowed()
281 else if (dev->power.runtime_status == RPM_SUSPENDED) in rpm_check_suspend_allowed()
287 static int rpm_get_suppliers(struct device *dev) in rpm_get_suppliers() argument
291 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in rpm_get_suppliers()
330 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) in __rpm_put_suppliers() argument
334 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in __rpm_put_suppliers()
342 static void rpm_put_suppliers(struct device *dev) in rpm_put_suppliers() argument
344 __rpm_put_suppliers(dev, true); in rpm_put_suppliers()
347 static void rpm_suspend_suppliers(struct device *dev) in rpm_suspend_suppliers() argument
352 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in rpm_suspend_suppliers()
364 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) in __rpm_callback() argument
365 __releases(&dev->power.lock) __acquires(&dev->power.lock) in __rpm_callback()
368 bool use_links = dev->power.links_count > 0; in __rpm_callback()
370 if (dev->power.irq_safe) { in __rpm_callback()
371 spin_unlock(&dev->power.lock); in __rpm_callback()
373 spin_unlock_irq(&dev->power.lock); in __rpm_callback()
382 if (use_links && dev->power.runtime_status == RPM_RESUMING) { in __rpm_callback()
385 retval = rpm_get_suppliers(dev); in __rpm_callback()
387 rpm_put_suppliers(dev); in __rpm_callback()
396 retval = cb(dev); in __rpm_callback()
398 if (dev->power.irq_safe) { in __rpm_callback()
399 spin_lock(&dev->power.lock); in __rpm_callback()
409 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval) in __rpm_callback()
410 || (dev->power.runtime_status == RPM_RESUMING && retval))) { in __rpm_callback()
413 __rpm_put_suppliers(dev, false); in __rpm_callback()
419 spin_lock_irq(&dev->power.lock); in __rpm_callback()
438 static int rpm_idle(struct device *dev, int rpmflags) in rpm_idle() argument
443 trace_rpm_idle_rcuidle(dev, rpmflags); in rpm_idle()
444 retval = rpm_check_suspend_allowed(dev); in rpm_idle()
449 else if (dev->power.runtime_status != RPM_ACTIVE) in rpm_idle()
456 else if (dev->power.request_pending && in rpm_idle()
457 dev->power.request > RPM_REQ_IDLE) in rpm_idle()
461 else if (dev->power.idle_notification) in rpm_idle()
467 dev->power.request = RPM_REQ_NONE; in rpm_idle()
469 callback = RPM_GET_CALLBACK(dev, runtime_idle); in rpm_idle()
472 if (!callback || dev->power.no_callbacks) in rpm_idle()
477 dev->power.request = RPM_REQ_IDLE; in rpm_idle()
478 if (!dev->power.request_pending) { in rpm_idle()
479 dev->power.request_pending = true; in rpm_idle()
480 queue_work(pm_wq, &dev->power.work); in rpm_idle()
482 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0); in rpm_idle()
486 dev->power.idle_notification = true; in rpm_idle()
488 if (dev->power.irq_safe) in rpm_idle()
489 spin_unlock(&dev->power.lock); in rpm_idle()
491 spin_unlock_irq(&dev->power.lock); in rpm_idle()
493 retval = callback(dev); in rpm_idle()
495 if (dev->power.irq_safe) in rpm_idle()
496 spin_lock(&dev->power.lock); in rpm_idle()
498 spin_lock_irq(&dev->power.lock); in rpm_idle()
500 dev->power.idle_notification = false; in rpm_idle()
501 wake_up_all(&dev->power.wait_queue); in rpm_idle()
504 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); in rpm_idle()
505 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); in rpm_idle()
513 static int rpm_callback(int (*cb)(struct device *), struct device *dev) in rpm_callback() argument
517 if (dev->power.memalloc_noio) { in rpm_callback()
530 retval = __rpm_callback(cb, dev); in rpm_callback()
533 retval = __rpm_callback(cb, dev); in rpm_callback()
536 dev->power.runtime_error = retval; in rpm_callback()
561 static int rpm_suspend(struct device *dev, int rpmflags) in rpm_suspend() argument
562 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_suspend()
568 trace_rpm_suspend_rcuidle(dev, rpmflags); in rpm_suspend()
571 retval = rpm_check_suspend_allowed(dev); in rpm_suspend()
576 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) in rpm_suspend()
583 && dev->power.runtime_status != RPM_SUSPENDING) { in rpm_suspend()
584 u64 expires = pm_runtime_autosuspend_expiration(dev); in rpm_suspend()
588 dev->power.request = RPM_REQ_NONE; in rpm_suspend()
597 if (!(dev->power.timer_expires && in rpm_suspend()
598 dev->power.timer_expires <= expires)) { in rpm_suspend()
603 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * in rpm_suspend()
606 dev->power.timer_expires = expires; in rpm_suspend()
607 hrtimer_start_range_ns(&dev->power.suspend_timer, in rpm_suspend()
612 dev->power.timer_autosuspends = 1; in rpm_suspend()
618 pm_runtime_cancel_pending(dev); in rpm_suspend()
620 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_suspend()
628 if (dev->power.irq_safe) { in rpm_suspend()
629 spin_unlock(&dev->power.lock); in rpm_suspend()
633 spin_lock(&dev->power.lock); in rpm_suspend()
639 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_suspend()
641 if (dev->power.runtime_status != RPM_SUSPENDING) in rpm_suspend()
644 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
648 spin_lock_irq(&dev->power.lock); in rpm_suspend()
650 finish_wait(&dev->power.wait_queue, &wait); in rpm_suspend()
654 if (dev->power.no_callbacks) in rpm_suspend()
659 dev->power.request = (rpmflags & RPM_AUTO) ? in rpm_suspend()
661 if (!dev->power.request_pending) { in rpm_suspend()
662 dev->power.request_pending = true; in rpm_suspend()
663 queue_work(pm_wq, &dev->power.work); in rpm_suspend()
668 __update_runtime_status(dev, RPM_SUSPENDING); in rpm_suspend()
670 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in rpm_suspend()
672 dev_pm_enable_wake_irq_check(dev, true); in rpm_suspend()
673 retval = rpm_callback(callback, dev); in rpm_suspend()
677 dev_pm_enable_wake_irq_complete(dev); in rpm_suspend()
680 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_suspend()
681 pm_runtime_deactivate_timer(dev); in rpm_suspend()
683 if (dev->parent) { in rpm_suspend()
684 parent = dev->parent; in rpm_suspend()
687 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
689 if (dev->power.deferred_resume) { in rpm_suspend()
690 dev->power.deferred_resume = false; in rpm_suspend()
691 rpm_resume(dev, 0); in rpm_suspend()
696 if (dev->power.irq_safe) in rpm_suspend()
701 spin_unlock(&dev->power.lock); in rpm_suspend()
707 spin_lock(&dev->power.lock); in rpm_suspend()
710 if (dev->power.links_count > 0) { in rpm_suspend()
711 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
713 rpm_suspend_suppliers(dev); in rpm_suspend()
715 spin_lock_irq(&dev->power.lock); in rpm_suspend()
719 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); in rpm_suspend()
724 dev_pm_disable_wake_irq_check(dev, true); in rpm_suspend()
725 __update_runtime_status(dev, RPM_ACTIVE); in rpm_suspend()
726 dev->power.deferred_resume = false; in rpm_suspend()
727 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
730 dev->power.runtime_error = 0; in rpm_suspend()
739 pm_runtime_autosuspend_expiration(dev) != 0) in rpm_suspend()
742 pm_runtime_cancel_pending(dev); in rpm_suspend()
764 static int rpm_resume(struct device *dev, int rpmflags) in rpm_resume() argument
765 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_resume()
771 trace_rpm_resume_rcuidle(dev, rpmflags); in rpm_resume()
774 if (dev->power.runtime_error) { in rpm_resume()
776 } else if (dev->power.disable_depth > 0) { in rpm_resume()
777 if (dev->power.runtime_status == RPM_ACTIVE && in rpm_resume()
778 dev->power.last_status == RPM_ACTIVE) in rpm_resume()
792 dev->power.request = RPM_REQ_NONE; in rpm_resume()
793 if (!dev->power.timer_autosuspends) in rpm_resume()
794 pm_runtime_deactivate_timer(dev); in rpm_resume()
796 if (dev->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
801 if (dev->power.runtime_status == RPM_RESUMING in rpm_resume()
802 || dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
806 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
807 dev->power.deferred_resume = true; in rpm_resume()
816 if (dev->power.irq_safe) { in rpm_resume()
817 spin_unlock(&dev->power.lock); in rpm_resume()
821 spin_lock(&dev->power.lock); in rpm_resume()
827 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_resume()
829 if (dev->power.runtime_status != RPM_RESUMING in rpm_resume()
830 && dev->power.runtime_status != RPM_SUSPENDING) in rpm_resume()
833 spin_unlock_irq(&dev->power.lock); in rpm_resume()
837 spin_lock_irq(&dev->power.lock); in rpm_resume()
839 finish_wait(&dev->power.wait_queue, &wait); in rpm_resume()
848 if (dev->power.no_callbacks && !parent && dev->parent) { in rpm_resume()
849 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); in rpm_resume()
850 if (dev->parent->power.disable_depth > 0 in rpm_resume()
851 || dev->parent->power.ignore_children in rpm_resume()
852 || dev->parent->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
853 atomic_inc(&dev->parent->power.child_count); in rpm_resume()
854 spin_unlock(&dev->parent->power.lock); in rpm_resume()
858 spin_unlock(&dev->parent->power.lock); in rpm_resume()
863 dev->power.request = RPM_REQ_RESUME; in rpm_resume()
864 if (!dev->power.request_pending) { in rpm_resume()
865 dev->power.request_pending = true; in rpm_resume()
866 queue_work(pm_wq, &dev->power.work); in rpm_resume()
872 if (!parent && dev->parent) { in rpm_resume()
878 parent = dev->parent; in rpm_resume()
879 if (dev->power.irq_safe) in rpm_resume()
881 spin_unlock(&dev->power.lock); in rpm_resume()
898 spin_lock(&dev->power.lock); in rpm_resume()
905 if (dev->power.no_callbacks) in rpm_resume()
908 __update_runtime_status(dev, RPM_RESUMING); in rpm_resume()
910 callback = RPM_GET_CALLBACK(dev, runtime_resume); in rpm_resume()
912 dev_pm_disable_wake_irq_check(dev, false); in rpm_resume()
913 retval = rpm_callback(callback, dev); in rpm_resume()
915 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_resume()
916 pm_runtime_cancel_pending(dev); in rpm_resume()
917 dev_pm_enable_wake_irq_check(dev, false); in rpm_resume()
920 __update_runtime_status(dev, RPM_ACTIVE); in rpm_resume()
921 pm_runtime_mark_last_busy(dev); in rpm_resume()
925 wake_up_all(&dev->power.wait_queue); in rpm_resume()
928 rpm_idle(dev, RPM_ASYNC); in rpm_resume()
931 if (parent && !dev->power.irq_safe) { in rpm_resume()
932 spin_unlock_irq(&dev->power.lock); in rpm_resume()
936 spin_lock_irq(&dev->power.lock); in rpm_resume()
939 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); in rpm_resume()
953 struct device *dev = container_of(work, struct device, power.work); in pm_runtime_work() local
956 spin_lock_irq(&dev->power.lock); in pm_runtime_work()
958 if (!dev->power.request_pending) in pm_runtime_work()
961 req = dev->power.request; in pm_runtime_work()
962 dev->power.request = RPM_REQ_NONE; in pm_runtime_work()
963 dev->power.request_pending = false; in pm_runtime_work()
969 rpm_idle(dev, RPM_NOWAIT); in pm_runtime_work()
972 rpm_suspend(dev, RPM_NOWAIT); in pm_runtime_work()
975 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); in pm_runtime_work()
978 rpm_resume(dev, RPM_NOWAIT); in pm_runtime_work()
983 spin_unlock_irq(&dev->power.lock); in pm_runtime_work()
994 struct device *dev = container_of(timer, struct device, power.suspend_timer); in pm_suspend_timer_fn() local
998 spin_lock_irqsave(&dev->power.lock, flags); in pm_suspend_timer_fn()
1000 expires = dev->power.timer_expires; in pm_suspend_timer_fn()
1006 dev->power.timer_expires = 0; in pm_suspend_timer_fn()
1007 rpm_suspend(dev, dev->power.timer_autosuspends ? in pm_suspend_timer_fn()
1011 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_suspend_timer_fn()
1021 int pm_schedule_suspend(struct device *dev, unsigned int delay) in pm_schedule_suspend() argument
1027 spin_lock_irqsave(&dev->power.lock, flags); in pm_schedule_suspend()
1030 retval = rpm_suspend(dev, RPM_ASYNC); in pm_schedule_suspend()
1034 retval = rpm_check_suspend_allowed(dev); in pm_schedule_suspend()
1039 pm_runtime_cancel_pending(dev); in pm_schedule_suspend()
1042 dev->power.timer_expires = expires; in pm_schedule_suspend()
1043 dev->power.timer_autosuspends = 0; in pm_schedule_suspend()
1044 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); in pm_schedule_suspend()
1047 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_schedule_suspend()
1053 static int rpm_drop_usage_count(struct device *dev) in rpm_drop_usage_count() argument
1057 ret = atomic_sub_return(1, &dev->power.usage_count); in rpm_drop_usage_count()
1067 atomic_inc(&dev->power.usage_count); in rpm_drop_usage_count()
1068 dev_warn(dev, "Runtime PM usage count underflow!\n"); in rpm_drop_usage_count()
1085 int __pm_runtime_idle(struct device *dev, int rpmflags) in __pm_runtime_idle() argument
1091 retval = rpm_drop_usage_count(dev); in __pm_runtime_idle()
1095 trace_rpm_usage_rcuidle(dev, rpmflags); in __pm_runtime_idle()
1100 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_idle()
1102 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_idle()
1103 retval = rpm_idle(dev, rpmflags); in __pm_runtime_idle()
1104 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_idle()
1123 int __pm_runtime_suspend(struct device *dev, int rpmflags) in __pm_runtime_suspend() argument
1129 retval = rpm_drop_usage_count(dev); in __pm_runtime_suspend()
1133 trace_rpm_usage_rcuidle(dev, rpmflags); in __pm_runtime_suspend()
1138 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_suspend()
1140 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_suspend()
1141 retval = rpm_suspend(dev, rpmflags); in __pm_runtime_suspend()
1142 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_suspend()
1159 int __pm_runtime_resume(struct device *dev, int rpmflags) in __pm_runtime_resume() argument
1164 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && in __pm_runtime_resume()
1165 dev->power.runtime_status != RPM_ACTIVE); in __pm_runtime_resume()
1168 atomic_inc(&dev->power.usage_count); in __pm_runtime_resume()
1170 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_resume()
1171 retval = rpm_resume(dev, rpmflags); in __pm_runtime_resume()
1172 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_resume()
1200 int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) in pm_runtime_get_if_active() argument
1205 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_get_if_active()
1206 if (dev->power.disable_depth > 0) { in pm_runtime_get_if_active()
1208 } else if (dev->power.runtime_status != RPM_ACTIVE) { in pm_runtime_get_if_active()
1212 atomic_inc(&dev->power.usage_count); in pm_runtime_get_if_active()
1214 retval = atomic_inc_not_zero(&dev->power.usage_count); in pm_runtime_get_if_active()
1216 trace_rpm_usage_rcuidle(dev, 0); in pm_runtime_get_if_active()
1217 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_get_if_active()
1247 int __pm_runtime_set_status(struct device *dev, unsigned int status) in __pm_runtime_set_status() argument
1249 struct device *parent = dev->parent; in __pm_runtime_set_status()
1257 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
1263 if (dev->power.runtime_error || dev->power.disable_depth) in __pm_runtime_set_status()
1264 dev->power.disable_depth++; in __pm_runtime_set_status()
1268 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1282 error = rpm_get_suppliers(dev); in __pm_runtime_set_status()
1289 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
1291 if (dev->power.runtime_status == status || !parent) in __pm_runtime_set_status()
1308 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", in __pm_runtime_set_status()
1309 dev_name(dev), in __pm_runtime_set_status()
1312 } else if (dev->power.runtime_status == RPM_SUSPENDED) { in __pm_runtime_set_status()
1325 __update_runtime_status(dev, status); in __pm_runtime_set_status()
1327 dev->power.runtime_error = 0; in __pm_runtime_set_status()
1330 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1338 rpm_put_suppliers(dev); in __pm_runtime_set_status()
1343 pm_runtime_enable(dev); in __pm_runtime_set_status()
1358 static void __pm_runtime_barrier(struct device *dev) in __pm_runtime_barrier() argument
1360 pm_runtime_deactivate_timer(dev); in __pm_runtime_barrier()
1362 if (dev->power.request_pending) { in __pm_runtime_barrier()
1363 dev->power.request = RPM_REQ_NONE; in __pm_runtime_barrier()
1364 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1366 cancel_work_sync(&dev->power.work); in __pm_runtime_barrier()
1368 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1369 dev->power.request_pending = false; in __pm_runtime_barrier()
1372 if (dev->power.runtime_status == RPM_SUSPENDING in __pm_runtime_barrier()
1373 || dev->power.runtime_status == RPM_RESUMING in __pm_runtime_barrier()
1374 || dev->power.idle_notification) { in __pm_runtime_barrier()
1379 prepare_to_wait(&dev->power.wait_queue, &wait, in __pm_runtime_barrier()
1381 if (dev->power.runtime_status != RPM_SUSPENDING in __pm_runtime_barrier()
1382 && dev->power.runtime_status != RPM_RESUMING in __pm_runtime_barrier()
1383 && !dev->power.idle_notification) in __pm_runtime_barrier()
1385 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1389 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1391 finish_wait(&dev->power.wait_queue, &wait); in __pm_runtime_barrier()
1409 int pm_runtime_barrier(struct device *dev) in pm_runtime_barrier() argument
1413 pm_runtime_get_noresume(dev); in pm_runtime_barrier()
1414 spin_lock_irq(&dev->power.lock); in pm_runtime_barrier()
1416 if (dev->power.request_pending in pm_runtime_barrier()
1417 && dev->power.request == RPM_REQ_RESUME) { in pm_runtime_barrier()
1418 rpm_resume(dev, 0); in pm_runtime_barrier()
1422 __pm_runtime_barrier(dev); in pm_runtime_barrier()
1424 spin_unlock_irq(&dev->power.lock); in pm_runtime_barrier()
1425 pm_runtime_put_noidle(dev); in pm_runtime_barrier()
1445 void __pm_runtime_disable(struct device *dev, bool check_resume) in __pm_runtime_disable() argument
1447 spin_lock_irq(&dev->power.lock); in __pm_runtime_disable()
1449 if (dev->power.disable_depth > 0) { in __pm_runtime_disable()
1450 dev->power.disable_depth++; in __pm_runtime_disable()
1459 if (check_resume && dev->power.request_pending in __pm_runtime_disable()
1460 && dev->power.request == RPM_REQ_RESUME) { in __pm_runtime_disable()
1465 pm_runtime_get_noresume(dev); in __pm_runtime_disable()
1467 rpm_resume(dev, 0); in __pm_runtime_disable()
1469 pm_runtime_put_noidle(dev); in __pm_runtime_disable()
1473 update_pm_runtime_accounting(dev); in __pm_runtime_disable()
1475 if (!dev->power.disable_depth++) { in __pm_runtime_disable()
1476 __pm_runtime_barrier(dev); in __pm_runtime_disable()
1477 dev->power.last_status = dev->power.runtime_status; in __pm_runtime_disable()
1481 spin_unlock_irq(&dev->power.lock); in __pm_runtime_disable()
1489 void pm_runtime_enable(struct device *dev) in pm_runtime_enable() argument
1493 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_enable()
1495 if (!dev->power.disable_depth) { in pm_runtime_enable()
1496 dev_warn(dev, "Unbalanced %s!\n", __func__); in pm_runtime_enable()
1500 if (--dev->power.disable_depth > 0) in pm_runtime_enable()
1503 dev->power.last_status = RPM_INVALID; in pm_runtime_enable()
1504 dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); in pm_runtime_enable()
1506 if (dev->power.runtime_status == RPM_SUSPENDED && in pm_runtime_enable()
1507 !dev->power.ignore_children && in pm_runtime_enable()
1508 atomic_read(&dev->power.child_count) > 0) in pm_runtime_enable()
1509 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n"); in pm_runtime_enable()
1512 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_enable()
1530 int devm_pm_runtime_enable(struct device *dev) in devm_pm_runtime_enable() argument
1532 pm_runtime_enable(dev); in devm_pm_runtime_enable()
1534 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev); in devm_pm_runtime_enable()
1546 void pm_runtime_forbid(struct device *dev) in pm_runtime_forbid() argument
1548 spin_lock_irq(&dev->power.lock); in pm_runtime_forbid()
1549 if (!dev->power.runtime_auto) in pm_runtime_forbid()
1552 dev->power.runtime_auto = false; in pm_runtime_forbid()
1553 atomic_inc(&dev->power.usage_count); in pm_runtime_forbid()
1554 rpm_resume(dev, 0); in pm_runtime_forbid()
1557 spin_unlock_irq(&dev->power.lock); in pm_runtime_forbid()
1567 void pm_runtime_allow(struct device *dev) in pm_runtime_allow() argument
1571 spin_lock_irq(&dev->power.lock); in pm_runtime_allow()
1572 if (dev->power.runtime_auto) in pm_runtime_allow()
1575 dev->power.runtime_auto = true; in pm_runtime_allow()
1576 ret = rpm_drop_usage_count(dev); in pm_runtime_allow()
1578 rpm_idle(dev, RPM_AUTO | RPM_ASYNC); in pm_runtime_allow()
1580 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC); in pm_runtime_allow()
1583 spin_unlock_irq(&dev->power.lock); in pm_runtime_allow()
1595 void pm_runtime_no_callbacks(struct device *dev) in pm_runtime_no_callbacks() argument
1597 spin_lock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1598 dev->power.no_callbacks = 1; in pm_runtime_no_callbacks()
1599 spin_unlock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1600 if (device_is_registered(dev)) in pm_runtime_no_callbacks()
1601 rpm_sysfs_remove(dev); in pm_runtime_no_callbacks()
1616 void pm_runtime_irq_safe(struct device *dev) in pm_runtime_irq_safe() argument
1618 if (dev->parent) in pm_runtime_irq_safe()
1619 pm_runtime_get_sync(dev->parent); in pm_runtime_irq_safe()
1620 spin_lock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1621 dev->power.irq_safe = 1; in pm_runtime_irq_safe()
1622 spin_unlock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1637 static void update_autosuspend(struct device *dev, int old_delay, int old_use) in update_autosuspend() argument
1639 int delay = dev->power.autosuspend_delay; in update_autosuspend()
1642 if (dev->power.use_autosuspend && delay < 0) { in update_autosuspend()
1646 atomic_inc(&dev->power.usage_count); in update_autosuspend()
1647 rpm_resume(dev, 0); in update_autosuspend()
1649 trace_rpm_usage_rcuidle(dev, 0); in update_autosuspend()
1658 atomic_dec(&dev->power.usage_count); in update_autosuspend()
1661 rpm_idle(dev, RPM_AUTO); in update_autosuspend()
1674 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) in pm_runtime_set_autosuspend_delay() argument
1678 spin_lock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1679 old_delay = dev->power.autosuspend_delay; in pm_runtime_set_autosuspend_delay()
1680 old_use = dev->power.use_autosuspend; in pm_runtime_set_autosuspend_delay()
1681 dev->power.autosuspend_delay = delay; in pm_runtime_set_autosuspend_delay()
1682 update_autosuspend(dev, old_delay, old_use); in pm_runtime_set_autosuspend_delay()
1683 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1695 void __pm_runtime_use_autosuspend(struct device *dev, bool use) in __pm_runtime_use_autosuspend() argument
1699 spin_lock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1700 old_delay = dev->power.autosuspend_delay; in __pm_runtime_use_autosuspend()
1701 old_use = dev->power.use_autosuspend; in __pm_runtime_use_autosuspend()
1702 dev->power.use_autosuspend = use; in __pm_runtime_use_autosuspend()
1703 update_autosuspend(dev, old_delay, old_use); in __pm_runtime_use_autosuspend()
1704 spin_unlock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1712 void pm_runtime_init(struct device *dev) in pm_runtime_init() argument
1714 dev->power.runtime_status = RPM_SUSPENDED; in pm_runtime_init()
1715 dev->power.last_status = RPM_INVALID; in pm_runtime_init()
1716 dev->power.idle_notification = false; in pm_runtime_init()
1718 dev->power.disable_depth = 1; in pm_runtime_init()
1719 atomic_set(&dev->power.usage_count, 0); in pm_runtime_init()
1721 dev->power.runtime_error = 0; in pm_runtime_init()
1723 atomic_set(&dev->power.child_count, 0); in pm_runtime_init()
1724 pm_suspend_ignore_children(dev, false); in pm_runtime_init()
1725 dev->power.runtime_auto = true; in pm_runtime_init()
1727 dev->power.request_pending = false; in pm_runtime_init()
1728 dev->power.request = RPM_REQ_NONE; in pm_runtime_init()
1729 dev->power.deferred_resume = false; in pm_runtime_init()
1730 dev->power.needs_force_resume = 0; in pm_runtime_init()
1731 INIT_WORK(&dev->power.work, pm_runtime_work); in pm_runtime_init()
1733 dev->power.timer_expires = 0; in pm_runtime_init()
1734 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); in pm_runtime_init()
1735 dev->power.suspend_timer.function = pm_suspend_timer_fn; in pm_runtime_init()
1737 init_waitqueue_head(&dev->power.wait_queue); in pm_runtime_init()
1744 void pm_runtime_reinit(struct device *dev) in pm_runtime_reinit() argument
1746 if (!pm_runtime_enabled(dev)) { in pm_runtime_reinit()
1747 if (dev->power.runtime_status == RPM_ACTIVE) in pm_runtime_reinit()
1748 pm_runtime_set_suspended(dev); in pm_runtime_reinit()
1749 if (dev->power.irq_safe) { in pm_runtime_reinit()
1750 spin_lock_irq(&dev->power.lock); in pm_runtime_reinit()
1751 dev->power.irq_safe = 0; in pm_runtime_reinit()
1752 spin_unlock_irq(&dev->power.lock); in pm_runtime_reinit()
1753 if (dev->parent) in pm_runtime_reinit()
1754 pm_runtime_put(dev->parent); in pm_runtime_reinit()
1763 void pm_runtime_remove(struct device *dev) in pm_runtime_remove() argument
1765 __pm_runtime_disable(dev, false); in pm_runtime_remove()
1766 pm_runtime_reinit(dev); in pm_runtime_remove()
1773 void pm_runtime_get_suppliers(struct device *dev) in pm_runtime_get_suppliers() argument
1780 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in pm_runtime_get_suppliers()
1794 void pm_runtime_put_suppliers(struct device *dev) in pm_runtime_put_suppliers() argument
1801 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in pm_runtime_put_suppliers()
1811 void pm_runtime_new_link(struct device *dev) in pm_runtime_new_link() argument
1813 spin_lock_irq(&dev->power.lock); in pm_runtime_new_link()
1814 dev->power.links_count++; in pm_runtime_new_link()
1815 spin_unlock_irq(&dev->power.lock); in pm_runtime_new_link()
1818 static void pm_runtime_drop_link_count(struct device *dev) in pm_runtime_drop_link_count() argument
1820 spin_lock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1821 WARN_ON(dev->power.links_count == 0); in pm_runtime_drop_link_count()
1822 dev->power.links_count--; in pm_runtime_drop_link_count()
1823 spin_unlock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1844 static bool pm_runtime_need_not_resume(struct device *dev) in pm_runtime_need_not_resume() argument
1846 return atomic_read(&dev->power.usage_count) <= 1 && in pm_runtime_need_not_resume()
1847 (atomic_read(&dev->power.child_count) == 0 || in pm_runtime_need_not_resume()
1848 dev->power.ignore_children); in pm_runtime_need_not_resume()
1868 int pm_runtime_force_suspend(struct device *dev) in pm_runtime_force_suspend() argument
1873 pm_runtime_disable(dev); in pm_runtime_force_suspend()
1874 if (pm_runtime_status_suspended(dev)) in pm_runtime_force_suspend()
1877 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in pm_runtime_force_suspend()
1879 dev_pm_enable_wake_irq_check(dev, true); in pm_runtime_force_suspend()
1880 ret = callback ? callback(dev) : 0; in pm_runtime_force_suspend()
1884 dev_pm_enable_wake_irq_complete(dev); in pm_runtime_force_suspend()
1892 if (pm_runtime_need_not_resume(dev)) { in pm_runtime_force_suspend()
1893 pm_runtime_set_suspended(dev); in pm_runtime_force_suspend()
1895 __update_runtime_status(dev, RPM_SUSPENDED); in pm_runtime_force_suspend()
1896 dev->power.needs_force_resume = 1; in pm_runtime_force_suspend()
1902 dev_pm_disable_wake_irq_check(dev, true); in pm_runtime_force_suspend()
1903 pm_runtime_enable(dev); in pm_runtime_force_suspend()
1920 int pm_runtime_force_resume(struct device *dev) in pm_runtime_force_resume() argument
1925 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) in pm_runtime_force_resume()
1932 __update_runtime_status(dev, RPM_ACTIVE); in pm_runtime_force_resume()
1934 callback = RPM_GET_CALLBACK(dev, runtime_resume); in pm_runtime_force_resume()
1936 dev_pm_disable_wake_irq_check(dev, false); in pm_runtime_force_resume()
1937 ret = callback ? callback(dev) : 0; in pm_runtime_force_resume()
1939 pm_runtime_set_suspended(dev); in pm_runtime_force_resume()
1940 dev_pm_enable_wake_irq_check(dev, false); in pm_runtime_force_resume()
1944 pm_runtime_mark_last_busy(dev); in pm_runtime_force_resume()
1946 dev->power.needs_force_resume = 0; in pm_runtime_force_resume()
1947 pm_runtime_enable(dev); in pm_runtime_force_resume()