1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * pm_runtime.h - Device run-time power management helper functions.
4 *
5 * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
6 */
7
8 #ifndef _LINUX_PM_RUNTIME_H
9 #define _LINUX_PM_RUNTIME_H
10
11 #include <linux/device.h>
12 #include <linux/notifier.h>
13 #include <linux/pm.h>
14
15 #include <linux/jiffies.h>
16
17 /* Runtime PM flag argument bits */
18 #define RPM_ASYNC 0x01 /* Request is asynchronous */
19 #define RPM_NOWAIT 0x02 /* Don't wait for concurrent
20 state change */
21 #define RPM_GET_PUT 0x04 /* Increment/decrement the
22 usage_count */
23 #define RPM_AUTO 0x08 /* Use autosuspend_delay */
24
25 #ifdef CONFIG_PM
26 extern struct workqueue_struct *pm_wq;
27
queue_pm_work(struct work_struct * work)28 static inline bool queue_pm_work(struct work_struct *work)
29 {
30 return queue_work(pm_wq, work);
31 }
32
33 extern int pm_generic_runtime_suspend(struct device *dev);
34 extern int pm_generic_runtime_resume(struct device *dev);
35 extern int pm_runtime_force_suspend(struct device *dev);
36 extern int pm_runtime_force_resume(struct device *dev);
37
38 extern int __pm_runtime_idle(struct device *dev, int rpmflags);
39 extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
40 extern int __pm_runtime_resume(struct device *dev, int rpmflags);
41 extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
42 extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
43 extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
44 extern int pm_runtime_barrier(struct device *dev);
45 extern void pm_runtime_enable(struct device *dev);
46 extern void __pm_runtime_disable(struct device *dev, bool check_resume);
47 extern void pm_runtime_allow(struct device *dev);
48 extern void pm_runtime_forbid(struct device *dev);
49 extern void pm_runtime_no_callbacks(struct device *dev);
50 extern void pm_runtime_irq_safe(struct device *dev);
51 extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
52 extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
53 extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
54 extern void pm_runtime_update_max_time_suspended(struct device *dev,
55 s64 delta_ns);
56 extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
57 extern void pm_runtime_get_suppliers(struct device *dev);
58 extern void pm_runtime_put_suppliers(struct device *dev);
59 extern void pm_runtime_new_link(struct device *dev);
60 extern void pm_runtime_drop_link(struct device_link *link);
61 extern void pm_runtime_release_supplier(struct device_link *link);
62
63 extern int devm_pm_runtime_enable(struct device *dev);
64
65 /**
66 * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
67 * @dev: Target device.
68 *
69 * Increment the runtime PM usage counter of @dev if its runtime PM status is
70 * %RPM_ACTIVE and its runtime PM usage counter is greater than 0.
71 */
pm_runtime_get_if_in_use(struct device * dev)72 static inline int pm_runtime_get_if_in_use(struct device *dev)
73 {
74 return pm_runtime_get_if_active(dev, false);
75 }
76
77 /**
78 * pm_suspend_ignore_children - Set runtime PM behavior regarding children.
79 * @dev: Target device.
80 * @enable: Whether or not to ignore possible dependencies on children.
81 *
82 * The dependencies of @dev on its children will not be taken into account by
83 * the runtime PM framework going forward if @enable is %true, or they will
84 * be taken into account otherwise.
85 */
pm_suspend_ignore_children(struct device * dev,bool enable)86 static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
87 {
88 dev->power.ignore_children = enable;
89 }
90
91 /**
92 * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device.
93 * @dev: Target device.
94 */
pm_runtime_get_noresume(struct device * dev)95 static inline void pm_runtime_get_noresume(struct device *dev)
96 {
97 atomic_inc(&dev->power.usage_count);
98 }
99
100 /**
101 * pm_runtime_put_noidle - Drop runtime PM usage counter of a device.
102 * @dev: Target device.
103 *
104 * Decrement the runtime PM usage counter of @dev unless it is 0 already.
105 */
pm_runtime_put_noidle(struct device * dev)106 static inline void pm_runtime_put_noidle(struct device *dev)
107 {
108 atomic_add_unless(&dev->power.usage_count, -1, 0);
109 }
110
111 /**
112 * pm_runtime_suspended - Check whether or not a device is runtime-suspended.
113 * @dev: Target device.
114 *
115 * Return %true if runtime PM is enabled for @dev and its runtime PM status is
116 * %RPM_SUSPENDED, or %false otherwise.
117 *
118 * Note that the return value of this function can only be trusted if it is
119 * called under the runtime PM lock of @dev or under conditions in which
120 * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
121 * status cannot change.
122 */
pm_runtime_suspended(struct device * dev)123 static inline bool pm_runtime_suspended(struct device *dev)
124 {
125 return dev->power.runtime_status == RPM_SUSPENDED
126 && !dev->power.disable_depth;
127 }
128
129 /**
130 * pm_runtime_active - Check whether or not a device is runtime-active.
131 * @dev: Target device.
132 *
133 * Return %true if runtime PM is disabled for @dev or its runtime PM status is
134 * %RPM_ACTIVE, or %false otherwise.
135 *
136 * Note that the return value of this function can only be trusted if it is
137 * called under the runtime PM lock of @dev or under conditions in which
138 * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
139 * status cannot change.
140 */
pm_runtime_active(struct device * dev)141 static inline bool pm_runtime_active(struct device *dev)
142 {
143 return dev->power.runtime_status == RPM_ACTIVE
144 || dev->power.disable_depth;
145 }
146
147 /**
148 * pm_runtime_status_suspended - Check if runtime PM status is "suspended".
149 * @dev: Target device.
150 *
151 * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false
152 * otherwise, regardless of whether or not runtime PM has been enabled for @dev.
153 *
154 * Note that the return value of this function can only be trusted if it is
155 * called under the runtime PM lock of @dev or under conditions in which the
156 * runtime PM status of @dev cannot change.
157 */
pm_runtime_status_suspended(struct device * dev)158 static inline bool pm_runtime_status_suspended(struct device *dev)
159 {
160 return dev->power.runtime_status == RPM_SUSPENDED;
161 }
162
163 /**
164 * pm_runtime_enabled - Check if runtime PM is enabled.
165 * @dev: Target device.
166 *
167 * Return %true if runtime PM is enabled for @dev or %false otherwise.
168 *
169 * Note that the return value of this function can only be trusted if it is
170 * called under the runtime PM lock of @dev or under conditions in which
171 * runtime PM cannot be either disabled or enabled for @dev.
172 */
pm_runtime_enabled(struct device * dev)173 static inline bool pm_runtime_enabled(struct device *dev)
174 {
175 return !dev->power.disable_depth;
176 }
177
178 /**
179 * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present.
180 * @dev: Target device.
181 *
182 * Return %true if @dev is a special device without runtime PM callbacks or
183 * %false otherwise.
184 */
pm_runtime_has_no_callbacks(struct device * dev)185 static inline bool pm_runtime_has_no_callbacks(struct device *dev)
186 {
187 return dev->power.no_callbacks;
188 }
189
190 /**
191 * pm_runtime_mark_last_busy - Update the last access time of a device.
192 * @dev: Target device.
193 *
194 * Update the last access time of @dev used by the runtime PM autosuspend
195 * mechanism to the current time as returned by ktime_get_mono_fast_ns().
196 */
pm_runtime_mark_last_busy(struct device * dev)197 static inline void pm_runtime_mark_last_busy(struct device *dev)
198 {
199 WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
200 }
201
202 /**
203 * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context.
204 * @dev: Target device.
205 *
206 * Return %true if @dev has been marked as an "IRQ-safe" device (with respect
207 * to runtime PM), in which case its runtime PM callabcks can be expected to
208 * work correctly when invoked from interrupt handlers.
209 */
pm_runtime_is_irq_safe(struct device * dev)210 static inline bool pm_runtime_is_irq_safe(struct device *dev)
211 {
212 return dev->power.irq_safe;
213 }
214
215 extern u64 pm_runtime_suspended_time(struct device *dev);
216
217 #else /* !CONFIG_PM */
218
queue_pm_work(struct work_struct * work)219 static inline bool queue_pm_work(struct work_struct *work) { return false; }
220
pm_generic_runtime_suspend(struct device * dev)221 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
pm_generic_runtime_resume(struct device * dev)222 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
pm_runtime_force_suspend(struct device * dev)223 static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
pm_runtime_force_resume(struct device * dev)224 static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
225
__pm_runtime_idle(struct device * dev,int rpmflags)226 static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
227 {
228 return -ENOSYS;
229 }
__pm_runtime_suspend(struct device * dev,int rpmflags)230 static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
231 {
232 return -ENOSYS;
233 }
__pm_runtime_resume(struct device * dev,int rpmflags)234 static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
235 {
236 return 1;
237 }
pm_schedule_suspend(struct device * dev,unsigned int delay)238 static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
239 {
240 return -ENOSYS;
241 }
pm_runtime_get_if_in_use(struct device * dev)242 static inline int pm_runtime_get_if_in_use(struct device *dev)
243 {
244 return -EINVAL;
245 }
pm_runtime_get_if_active(struct device * dev,bool ign_usage_count)246 static inline int pm_runtime_get_if_active(struct device *dev,
247 bool ign_usage_count)
248 {
249 return -EINVAL;
250 }
__pm_runtime_set_status(struct device * dev,unsigned int status)251 static inline int __pm_runtime_set_status(struct device *dev,
252 unsigned int status) { return 0; }
pm_runtime_barrier(struct device * dev)253 static inline int pm_runtime_barrier(struct device *dev) { return 0; }
pm_runtime_enable(struct device * dev)254 static inline void pm_runtime_enable(struct device *dev) {}
__pm_runtime_disable(struct device * dev,bool c)255 static inline void __pm_runtime_disable(struct device *dev, bool c) {}
pm_runtime_allow(struct device * dev)256 static inline void pm_runtime_allow(struct device *dev) {}
pm_runtime_forbid(struct device * dev)257 static inline void pm_runtime_forbid(struct device *dev) {}
258
devm_pm_runtime_enable(struct device * dev)259 static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
260
pm_suspend_ignore_children(struct device * dev,bool enable)261 static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
pm_runtime_get_noresume(struct device * dev)262 static inline void pm_runtime_get_noresume(struct device *dev) {}
pm_runtime_put_noidle(struct device * dev)263 static inline void pm_runtime_put_noidle(struct device *dev) {}
pm_runtime_suspended(struct device * dev)264 static inline bool pm_runtime_suspended(struct device *dev) { return false; }
pm_runtime_active(struct device * dev)265 static inline bool pm_runtime_active(struct device *dev) { return true; }
pm_runtime_status_suspended(struct device * dev)266 static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
pm_runtime_enabled(struct device * dev)267 static inline bool pm_runtime_enabled(struct device *dev) { return false; }
268
pm_runtime_no_callbacks(struct device * dev)269 static inline void pm_runtime_no_callbacks(struct device *dev) {}
pm_runtime_irq_safe(struct device * dev)270 static inline void pm_runtime_irq_safe(struct device *dev) {}
pm_runtime_is_irq_safe(struct device * dev)271 static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
272
pm_runtime_has_no_callbacks(struct device * dev)273 static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
pm_runtime_mark_last_busy(struct device * dev)274 static inline void pm_runtime_mark_last_busy(struct device *dev) {}
__pm_runtime_use_autosuspend(struct device * dev,bool use)275 static inline void __pm_runtime_use_autosuspend(struct device *dev,
276 bool use) {}
pm_runtime_set_autosuspend_delay(struct device * dev,int delay)277 static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
278 int delay) {}
pm_runtime_autosuspend_expiration(struct device * dev)279 static inline u64 pm_runtime_autosuspend_expiration(
280 struct device *dev) { return 0; }
pm_runtime_set_memalloc_noio(struct device * dev,bool enable)281 static inline void pm_runtime_set_memalloc_noio(struct device *dev,
282 bool enable){}
pm_runtime_get_suppliers(struct device * dev)283 static inline void pm_runtime_get_suppliers(struct device *dev) {}
pm_runtime_put_suppliers(struct device * dev)284 static inline void pm_runtime_put_suppliers(struct device *dev) {}
pm_runtime_new_link(struct device * dev)285 static inline void pm_runtime_new_link(struct device *dev) {}
pm_runtime_drop_link(struct device_link * link)286 static inline void pm_runtime_drop_link(struct device_link *link) {}
pm_runtime_release_supplier(struct device_link * link)287 static inline void pm_runtime_release_supplier(struct device_link *link) {}
288
289 #endif /* !CONFIG_PM */
290
291 /**
292 * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
293 * @dev: Target device.
294 *
295 * Invoke the "idle check" callback of @dev and, depending on its return value,
296 * set up autosuspend of @dev or suspend it (depending on whether or not
297 * autosuspend has been enabled for it).
298 */
pm_runtime_idle(struct device * dev)299 static inline int pm_runtime_idle(struct device *dev)
300 {
301 return __pm_runtime_idle(dev, 0);
302 }
303
304 /**
305 * pm_runtime_suspend - Suspend a device synchronously.
306 * @dev: Target device.
307 */
pm_runtime_suspend(struct device * dev)308 static inline int pm_runtime_suspend(struct device *dev)
309 {
310 return __pm_runtime_suspend(dev, 0);
311 }
312
313 /**
314 * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it.
315 * @dev: Target device.
316 *
317 * Set up autosuspend of @dev or suspend it (depending on whether or not
318 * autosuspend is enabled for it) without engaging its "idle check" callback.
319 */
pm_runtime_autosuspend(struct device * dev)320 static inline int pm_runtime_autosuspend(struct device *dev)
321 {
322 return __pm_runtime_suspend(dev, RPM_AUTO);
323 }
324
325 /**
326 * pm_runtime_resume - Resume a device synchronously.
327 * @dev: Target device.
328 */
pm_runtime_resume(struct device * dev)329 static inline int pm_runtime_resume(struct device *dev)
330 {
331 return __pm_runtime_resume(dev, 0);
332 }
333
334 /**
335 * pm_request_idle - Queue up "idle check" execution for a device.
336 * @dev: Target device.
337 *
338 * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev
339 * asynchronously.
340 */
pm_request_idle(struct device * dev)341 static inline int pm_request_idle(struct device *dev)
342 {
343 return __pm_runtime_idle(dev, RPM_ASYNC);
344 }
345
346 /**
347 * pm_request_resume - Queue up runtime-resume of a device.
348 * @dev: Target device.
349 */
pm_request_resume(struct device * dev)350 static inline int pm_request_resume(struct device *dev)
351 {
352 return __pm_runtime_resume(dev, RPM_ASYNC);
353 }
354
355 /**
356 * pm_request_autosuspend - Queue up autosuspend of a device.
357 * @dev: Target device.
358 *
359 * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev
360 * asynchronously.
361 */
pm_request_autosuspend(struct device * dev)362 static inline int pm_request_autosuspend(struct device *dev)
363 {
364 return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
365 }
366
367 /**
368 * pm_runtime_get - Bump up usage counter and queue up resume of a device.
369 * @dev: Target device.
370 *
371 * Bump up the runtime PM usage counter of @dev and queue up a work item to
372 * carry out runtime-resume of it.
373 */
pm_runtime_get(struct device * dev)374 static inline int pm_runtime_get(struct device *dev)
375 {
376 return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
377 }
378
379 /**
380 * pm_runtime_get_sync - Bump up usage counter of a device and resume it.
381 * @dev: Target device.
382 *
383 * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of
384 * it synchronously.
385 *
386 * The possible return values of this function are the same as for
387 * pm_runtime_resume() and the runtime PM usage counter of @dev remains
388 * incremented in all cases, even if it returns an error code.
389 * Consider using pm_runtime_resume_and_get() instead of it, especially
390 * if its return value is checked by the caller, as this is likely to result
391 * in cleaner code.
392 */
pm_runtime_get_sync(struct device * dev)393 static inline int pm_runtime_get_sync(struct device *dev)
394 {
395 return __pm_runtime_resume(dev, RPM_GET_PUT);
396 }
397
398 /**
399 * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
400 * @dev: Target device.
401 *
402 * Resume @dev synchronously and if that is successful, increment its runtime
403 * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
404 * incremented or a negative error code otherwise.
405 */
pm_runtime_resume_and_get(struct device * dev)406 static inline int pm_runtime_resume_and_get(struct device *dev)
407 {
408 int ret;
409
410 ret = __pm_runtime_resume(dev, RPM_GET_PUT);
411 if (ret < 0) {
412 pm_runtime_put_noidle(dev);
413 return ret;
414 }
415
416 return 0;
417 }
418
419 /**
420 * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
421 * @dev: Target device.
422 *
423 * Decrement the runtime PM usage counter of @dev and if it turns out to be
424 * equal to 0, queue up a work item for @dev like in pm_request_idle().
425 */
pm_runtime_put(struct device * dev)426 static inline int pm_runtime_put(struct device *dev)
427 {
428 return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
429 }
430
431 /**
432 * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
433 * @dev: Target device.
434 *
435 * Decrement the runtime PM usage counter of @dev and if it turns out to be
436 * equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
437 */
pm_runtime_put_autosuspend(struct device * dev)438 static inline int pm_runtime_put_autosuspend(struct device *dev)
439 {
440 return __pm_runtime_suspend(dev,
441 RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
442 }
443
444 /**
445 * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
446 * @dev: Target device.
447 *
448 * Decrement the runtime PM usage counter of @dev and if it turns out to be
449 * equal to 0, invoke the "idle check" callback of @dev and, depending on its
450 * return value, set up autosuspend of @dev or suspend it (depending on whether
451 * or not autosuspend has been enabled for it).
452 *
453 * The possible return values of this function are the same as for
454 * pm_runtime_idle() and the runtime PM usage counter of @dev remains
455 * decremented in all cases, even if it returns an error code.
456 */
pm_runtime_put_sync(struct device * dev)457 static inline int pm_runtime_put_sync(struct device *dev)
458 {
459 return __pm_runtime_idle(dev, RPM_GET_PUT);
460 }
461
462 /**
463 * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0.
464 * @dev: Target device.
465 *
466 * Decrement the runtime PM usage counter of @dev and if it turns out to be
467 * equal to 0, carry out runtime-suspend of @dev synchronously.
468 *
469 * The possible return values of this function are the same as for
470 * pm_runtime_suspend() and the runtime PM usage counter of @dev remains
471 * decremented in all cases, even if it returns an error code.
472 */
pm_runtime_put_sync_suspend(struct device * dev)473 static inline int pm_runtime_put_sync_suspend(struct device *dev)
474 {
475 return __pm_runtime_suspend(dev, RPM_GET_PUT);
476 }
477
478 /**
479 * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0.
480 * @dev: Target device.
481 *
482 * Decrement the runtime PM usage counter of @dev and if it turns out to be
483 * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending
484 * on whether or not autosuspend has been enabled for it).
485 *
486 * The possible return values of this function are the same as for
487 * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains
488 * decremented in all cases, even if it returns an error code.
489 */
pm_runtime_put_sync_autosuspend(struct device * dev)490 static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
491 {
492 return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
493 }
494
495 /**
496 * pm_runtime_set_active - Set runtime PM status to "active".
497 * @dev: Target device.
498 *
499 * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies
500 * of it will be taken into account.
501 *
502 * It is not valid to call this function for devices with runtime PM enabled.
503 */
pm_runtime_set_active(struct device * dev)504 static inline int pm_runtime_set_active(struct device *dev)
505 {
506 return __pm_runtime_set_status(dev, RPM_ACTIVE);
507 }
508
509 /**
510 * pm_runtime_set_suspended - Set runtime PM status to "suspended".
511 * @dev: Target device.
512 *
513 * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that
514 * dependencies of it will be taken into account.
515 *
516 * It is not valid to call this function for devices with runtime PM enabled.
517 */
pm_runtime_set_suspended(struct device * dev)518 static inline int pm_runtime_set_suspended(struct device *dev)
519 {
520 return __pm_runtime_set_status(dev, RPM_SUSPENDED);
521 }
522
523 /**
524 * pm_runtime_disable - Disable runtime PM for a device.
525 * @dev: Target device.
526 *
527 * Prevent the runtime PM framework from working with @dev (by incrementing its
528 * "blocking" counter).
529 *
530 * For each invocation of this function for @dev there must be a matching
531 * pm_runtime_enable() call in order for runtime PM to be enabled for it.
532 */
pm_runtime_disable(struct device * dev)533 static inline void pm_runtime_disable(struct device *dev)
534 {
535 __pm_runtime_disable(dev, true);
536 }
537
538 /**
539 * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device.
540 * @dev: Target device.
541 *
542 * Allow the runtime PM autosuspend mechanism to be used for @dev whenever
543 * requested (or "autosuspend" will be handled as direct runtime-suspend for
544 * it).
545 *
546 * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend()
547 * at driver exit time unless your driver initially enabled pm_runtime
548 * with devm_pm_runtime_enable() (which handles it for you).
549 */
pm_runtime_use_autosuspend(struct device * dev)550 static inline void pm_runtime_use_autosuspend(struct device *dev)
551 {
552 __pm_runtime_use_autosuspend(dev, true);
553 }
554
555 /**
556 * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used.
557 * @dev: Target device.
558 *
559 * Prevent the runtime PM autosuspend mechanism from being used for @dev which
560 * means that "autosuspend" will be handled as direct runtime-suspend for it
561 * going forward.
562 */
pm_runtime_dont_use_autosuspend(struct device * dev)563 static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
564 {
565 __pm_runtime_use_autosuspend(dev, false);
566 }
567
568 #endif
569