1 /*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
22
23 #include "power.h"
24
25 #define GENPD_RETRY_MAX_MS 250 /* Approximate */
26
27 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
28 ({ \
29 type (*__routine)(struct device *__d); \
30 type __ret = (type)0; \
31 \
32 __routine = genpd->dev_ops.callback; \
33 if (__routine) { \
34 __ret = __routine(dev); \
35 } \
36 __ret; \
37 })
38
39 static LIST_HEAD(gpd_list);
40 static DEFINE_MUTEX(gpd_list_lock);
41
42 /*
43 * Get the generic PM domain for a particular struct device.
44 * This validates the struct device pointer, the PM domain pointer,
45 * and checks that the PM domain pointer is a real generic PM domain.
46 * Any failure results in NULL being returned.
47 */
genpd_lookup_dev(struct device * dev)48 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
49 {
50 struct generic_pm_domain *genpd = NULL, *gpd;
51
52 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
53 return NULL;
54
55 mutex_lock(&gpd_list_lock);
56 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
57 if (&gpd->domain == dev->pm_domain) {
58 genpd = gpd;
59 break;
60 }
61 }
62 mutex_unlock(&gpd_list_lock);
63
64 return genpd;
65 }
66
67 /*
68 * This should only be used where we are certain that the pm_domain
69 * attached to the device is a genpd domain.
70 */
dev_to_genpd(struct device * dev)71 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
72 {
73 if (IS_ERR_OR_NULL(dev->pm_domain))
74 return ERR_PTR(-EINVAL);
75
76 return pd_to_genpd(dev->pm_domain);
77 }
78
genpd_stop_dev(struct generic_pm_domain * genpd,struct device * dev)79 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
80 {
81 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
82 }
83
genpd_start_dev(struct generic_pm_domain * genpd,struct device * dev)84 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
85 {
86 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
87 }
88
genpd_sd_counter_dec(struct generic_pm_domain * genpd)89 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
90 {
91 bool ret = false;
92
93 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
94 ret = !!atomic_dec_and_test(&genpd->sd_count);
95
96 return ret;
97 }
98
genpd_sd_counter_inc(struct generic_pm_domain * genpd)99 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
100 {
101 atomic_inc(&genpd->sd_count);
102 smp_mb__after_atomic();
103 }
104
genpd_power_on(struct generic_pm_domain * genpd,bool timed)105 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
106 {
107 unsigned int state_idx = genpd->state_idx;
108 ktime_t time_start;
109 s64 elapsed_ns;
110 int ret;
111
112 if (!genpd->power_on)
113 return 0;
114
115 if (!timed)
116 return genpd->power_on(genpd);
117
118 time_start = ktime_get();
119 ret = genpd->power_on(genpd);
120 if (ret)
121 return ret;
122
123 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
124 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
125 return ret;
126
127 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
128 genpd->max_off_time_changed = true;
129 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
130 genpd->name, "on", elapsed_ns);
131
132 return ret;
133 }
134
genpd_power_off(struct generic_pm_domain * genpd,bool timed)135 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
136 {
137 unsigned int state_idx = genpd->state_idx;
138 ktime_t time_start;
139 s64 elapsed_ns;
140 int ret;
141
142 if (!genpd->power_off)
143 return 0;
144
145 if (!timed)
146 return genpd->power_off(genpd);
147
148 time_start = ktime_get();
149 ret = genpd->power_off(genpd);
150 if (ret == -EBUSY)
151 return ret;
152
153 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
154 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
155 return ret;
156
157 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
158 genpd->max_off_time_changed = true;
159 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
160 genpd->name, "off", elapsed_ns);
161
162 return ret;
163 }
164
165 /**
166 * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
167 * @genpd: PM domain to power off.
168 *
169 * Queue up the execution of genpd_poweroff() unless it's already been done
170 * before.
171 */
genpd_queue_power_off_work(struct generic_pm_domain * genpd)172 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
173 {
174 queue_work(pm_wq, &genpd->power_off_work);
175 }
176
177 /**
178 * genpd_poweron - Restore power to a given PM domain and its masters.
179 * @genpd: PM domain to power up.
180 * @depth: nesting count for lockdep.
181 *
182 * Restore power to @genpd and all of its masters so that it is possible to
183 * resume a device belonging to it.
184 */
genpd_poweron(struct generic_pm_domain * genpd,unsigned int depth)185 static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
186 {
187 struct gpd_link *link;
188 int ret = 0;
189
190 if (genpd->status == GPD_STATE_ACTIVE)
191 return 0;
192
193 /*
194 * The list is guaranteed not to change while the loop below is being
195 * executed, unless one of the masters' .power_on() callbacks fiddles
196 * with it.
197 */
198 list_for_each_entry(link, &genpd->slave_links, slave_node) {
199 struct generic_pm_domain *master = link->master;
200
201 genpd_sd_counter_inc(master);
202
203 mutex_lock_nested(&master->lock, depth + 1);
204 ret = genpd_poweron(master, depth + 1);
205 mutex_unlock(&master->lock);
206
207 if (ret) {
208 genpd_sd_counter_dec(master);
209 goto err;
210 }
211 }
212
213 ret = genpd_power_on(genpd, true);
214 if (ret)
215 goto err;
216
217 genpd->status = GPD_STATE_ACTIVE;
218 return 0;
219
220 err:
221 list_for_each_entry_continue_reverse(link,
222 &genpd->slave_links,
223 slave_node) {
224 genpd_sd_counter_dec(link->master);
225 genpd_queue_power_off_work(link->master);
226 }
227
228 return ret;
229 }
230
genpd_dev_pm_qos_notifier(struct notifier_block * nb,unsigned long val,void * ptr)231 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
232 unsigned long val, void *ptr)
233 {
234 struct generic_pm_domain_data *gpd_data;
235 struct device *dev;
236
237 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
238 dev = gpd_data->base.dev;
239
240 for (;;) {
241 struct generic_pm_domain *genpd;
242 struct pm_domain_data *pdd;
243
244 spin_lock_irq(&dev->power.lock);
245
246 pdd = dev->power.subsys_data ?
247 dev->power.subsys_data->domain_data : NULL;
248 if (pdd && pdd->dev) {
249 to_gpd_data(pdd)->td.constraint_changed = true;
250 genpd = dev_to_genpd(dev);
251 } else {
252 genpd = ERR_PTR(-ENODATA);
253 }
254
255 spin_unlock_irq(&dev->power.lock);
256
257 if (!IS_ERR(genpd)) {
258 mutex_lock(&genpd->lock);
259 genpd->max_off_time_changed = true;
260 mutex_unlock(&genpd->lock);
261 }
262
263 dev = dev->parent;
264 if (!dev || dev->power.ignore_children)
265 break;
266 }
267
268 return NOTIFY_DONE;
269 }
270
271 /**
272 * genpd_poweroff - Remove power from a given PM domain.
273 * @genpd: PM domain to power down.
274 * @is_async: PM domain is powered down from a scheduled work
275 *
276 * If all of the @genpd's devices have been suspended and all of its subdomains
277 * have been powered down, remove power from @genpd.
278 */
genpd_poweroff(struct generic_pm_domain * genpd,bool is_async)279 static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
280 {
281 struct pm_domain_data *pdd;
282 struct gpd_link *link;
283 unsigned int not_suspended = 0;
284
285 /*
286 * Do not try to power off the domain in the following situations:
287 * (1) The domain is already in the "power off" state.
288 * (2) System suspend is in progress.
289 */
290 if (genpd->status == GPD_STATE_POWER_OFF
291 || genpd->prepared_count > 0)
292 return 0;
293
294 if (atomic_read(&genpd->sd_count) > 0)
295 return -EBUSY;
296
297 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
298 enum pm_qos_flags_status stat;
299
300 stat = dev_pm_qos_flags(pdd->dev,
301 PM_QOS_FLAG_NO_POWER_OFF
302 | PM_QOS_FLAG_REMOTE_WAKEUP);
303 if (stat > PM_QOS_FLAGS_NONE)
304 return -EBUSY;
305
306 if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
307 not_suspended++;
308 }
309
310 if (not_suspended > 1 || (not_suspended == 1 && is_async))
311 return -EBUSY;
312
313 if (genpd->gov && genpd->gov->power_down_ok) {
314 if (!genpd->gov->power_down_ok(&genpd->domain))
315 return -EAGAIN;
316 }
317
318 if (genpd->power_off) {
319 int ret;
320
321 if (atomic_read(&genpd->sd_count) > 0)
322 return -EBUSY;
323
324 /*
325 * If sd_count > 0 at this point, one of the subdomains hasn't
326 * managed to call genpd_poweron() for the master yet after
327 * incrementing it. In that case genpd_poweron() will wait
328 * for us to drop the lock, so we can call .power_off() and let
329 * the genpd_poweron() restore power for us (this shouldn't
330 * happen very often).
331 */
332 ret = genpd_power_off(genpd, true);
333 if (ret)
334 return ret;
335 }
336
337 genpd->status = GPD_STATE_POWER_OFF;
338
339 list_for_each_entry(link, &genpd->slave_links, slave_node) {
340 genpd_sd_counter_dec(link->master);
341 genpd_queue_power_off_work(link->master);
342 }
343
344 return 0;
345 }
346
347 /**
348 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
349 * @work: Work structure used for scheduling the execution of this function.
350 */
genpd_power_off_work_fn(struct work_struct * work)351 static void genpd_power_off_work_fn(struct work_struct *work)
352 {
353 struct generic_pm_domain *genpd;
354
355 genpd = container_of(work, struct generic_pm_domain, power_off_work);
356
357 mutex_lock(&genpd->lock);
358 genpd_poweroff(genpd, true);
359 mutex_unlock(&genpd->lock);
360 }
361
362 /**
363 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
364 * @dev: Device to handle.
365 */
__genpd_runtime_suspend(struct device * dev)366 static int __genpd_runtime_suspend(struct device *dev)
367 {
368 int (*cb)(struct device *__dev);
369
370 if (dev->type && dev->type->pm)
371 cb = dev->type->pm->runtime_suspend;
372 else if (dev->class && dev->class->pm)
373 cb = dev->class->pm->runtime_suspend;
374 else if (dev->bus && dev->bus->pm)
375 cb = dev->bus->pm->runtime_suspend;
376 else
377 cb = NULL;
378
379 if (!cb && dev->driver && dev->driver->pm)
380 cb = dev->driver->pm->runtime_suspend;
381
382 return cb ? cb(dev) : 0;
383 }
384
385 /**
386 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
387 * @dev: Device to handle.
388 */
__genpd_runtime_resume(struct device * dev)389 static int __genpd_runtime_resume(struct device *dev)
390 {
391 int (*cb)(struct device *__dev);
392
393 if (dev->type && dev->type->pm)
394 cb = dev->type->pm->runtime_resume;
395 else if (dev->class && dev->class->pm)
396 cb = dev->class->pm->runtime_resume;
397 else if (dev->bus && dev->bus->pm)
398 cb = dev->bus->pm->runtime_resume;
399 else
400 cb = NULL;
401
402 if (!cb && dev->driver && dev->driver->pm)
403 cb = dev->driver->pm->runtime_resume;
404
405 return cb ? cb(dev) : 0;
406 }
407
408 /**
409 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
410 * @dev: Device to suspend.
411 *
412 * Carry out a runtime suspend of a device under the assumption that its
413 * pm_domain field points to the domain member of an object of type
414 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
415 */
genpd_runtime_suspend(struct device * dev)416 static int genpd_runtime_suspend(struct device *dev)
417 {
418 struct generic_pm_domain *genpd;
419 bool (*suspend_ok)(struct device *__dev);
420 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
421 bool runtime_pm = pm_runtime_enabled(dev);
422 ktime_t time_start;
423 s64 elapsed_ns;
424 int ret;
425
426 dev_dbg(dev, "%s()\n", __func__);
427
428 genpd = dev_to_genpd(dev);
429 if (IS_ERR(genpd))
430 return -EINVAL;
431
432 /*
433 * A runtime PM centric subsystem/driver may re-use the runtime PM
434 * callbacks for other purposes than runtime PM. In those scenarios
435 * runtime PM is disabled. Under these circumstances, we shall skip
436 * validating/measuring the PM QoS latency.
437 */
438 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
439 if (runtime_pm && suspend_ok && !suspend_ok(dev))
440 return -EBUSY;
441
442 /* Measure suspend latency. */
443 if (runtime_pm)
444 time_start = ktime_get();
445
446 ret = __genpd_runtime_suspend(dev);
447 if (ret)
448 return ret;
449
450 ret = genpd_stop_dev(genpd, dev);
451 if (ret) {
452 __genpd_runtime_resume(dev);
453 return ret;
454 }
455
456 /* Update suspend latency value if the measured time exceeds it. */
457 if (runtime_pm) {
458 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
459 if (elapsed_ns > td->suspend_latency_ns) {
460 td->suspend_latency_ns = elapsed_ns;
461 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
462 elapsed_ns);
463 genpd->max_off_time_changed = true;
464 td->constraint_changed = true;
465 }
466 }
467
468 /*
469 * If power.irq_safe is set, this routine will be run with interrupts
470 * off, so it can't use mutexes.
471 */
472 if (dev->power.irq_safe)
473 return 0;
474
475 mutex_lock(&genpd->lock);
476 genpd_poweroff(genpd, false);
477 mutex_unlock(&genpd->lock);
478
479 return 0;
480 }
481
482 /**
483 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
484 * @dev: Device to resume.
485 *
486 * Carry out a runtime resume of a device under the assumption that its
487 * pm_domain field points to the domain member of an object of type
488 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
489 */
genpd_runtime_resume(struct device * dev)490 static int genpd_runtime_resume(struct device *dev)
491 {
492 struct generic_pm_domain *genpd;
493 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
494 bool runtime_pm = pm_runtime_enabled(dev);
495 ktime_t time_start;
496 s64 elapsed_ns;
497 int ret;
498 bool timed = true;
499
500 dev_dbg(dev, "%s()\n", __func__);
501
502 genpd = dev_to_genpd(dev);
503 if (IS_ERR(genpd))
504 return -EINVAL;
505
506 /* If power.irq_safe, the PM domain is never powered off. */
507 if (dev->power.irq_safe) {
508 timed = false;
509 goto out;
510 }
511
512 mutex_lock(&genpd->lock);
513 ret = genpd_poweron(genpd, 0);
514 mutex_unlock(&genpd->lock);
515
516 if (ret)
517 return ret;
518
519 out:
520 /* Measure resume latency. */
521 if (timed && runtime_pm)
522 time_start = ktime_get();
523
524 ret = genpd_start_dev(genpd, dev);
525 if (ret)
526 goto err_poweroff;
527
528 ret = __genpd_runtime_resume(dev);
529 if (ret)
530 goto err_stop;
531
532 /* Update resume latency value if the measured time exceeds it. */
533 if (timed && runtime_pm) {
534 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
535 if (elapsed_ns > td->resume_latency_ns) {
536 td->resume_latency_ns = elapsed_ns;
537 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
538 elapsed_ns);
539 genpd->max_off_time_changed = true;
540 td->constraint_changed = true;
541 }
542 }
543
544 return 0;
545
546 err_stop:
547 genpd_stop_dev(genpd, dev);
548 err_poweroff:
549 if (!dev->power.irq_safe) {
550 mutex_lock(&genpd->lock);
551 genpd_poweroff(genpd, 0);
552 mutex_unlock(&genpd->lock);
553 }
554
555 return ret;
556 }
557
558 static bool pd_ignore_unused;
pd_ignore_unused_setup(char * __unused)559 static int __init pd_ignore_unused_setup(char *__unused)
560 {
561 pd_ignore_unused = true;
562 return 1;
563 }
564 __setup("pd_ignore_unused", pd_ignore_unused_setup);
565
566 /**
567 * genpd_poweroff_unused - Power off all PM domains with no devices in use.
568 */
genpd_poweroff_unused(void)569 static int __init genpd_poweroff_unused(void)
570 {
571 struct generic_pm_domain *genpd;
572
573 if (pd_ignore_unused) {
574 pr_warn("genpd: Not disabling unused power domains\n");
575 return 0;
576 }
577
578 mutex_lock(&gpd_list_lock);
579
580 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
581 genpd_queue_power_off_work(genpd);
582
583 mutex_unlock(&gpd_list_lock);
584
585 return 0;
586 }
587 late_initcall(genpd_poweroff_unused);
588
589 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
590
591 /**
592 * pm_genpd_present - Check if the given PM domain has been initialized.
593 * @genpd: PM domain to check.
594 */
pm_genpd_present(const struct generic_pm_domain * genpd)595 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
596 {
597 const struct generic_pm_domain *gpd;
598
599 if (IS_ERR_OR_NULL(genpd))
600 return false;
601
602 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
603 if (gpd == genpd)
604 return true;
605
606 return false;
607 }
608
609 #endif
610
611 #ifdef CONFIG_PM_SLEEP
612
genpd_dev_active_wakeup(struct generic_pm_domain * genpd,struct device * dev)613 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
614 struct device *dev)
615 {
616 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
617 }
618
619 /**
620 * genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
621 * @genpd: PM domain to power off, if possible.
622 *
623 * Check if the given PM domain can be powered off (during system suspend or
624 * hibernation) and do that if so. Also, in that case propagate to its masters.
625 *
626 * This function is only called in "noirq" and "syscore" stages of system power
627 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
628 * executed sequentially, so it is guaranteed that it will never run twice in
629 * parallel).
630 */
genpd_sync_poweroff(struct generic_pm_domain * genpd)631 static void genpd_sync_poweroff(struct generic_pm_domain *genpd)
632 {
633 struct gpd_link *link;
634
635 if (genpd->status == GPD_STATE_POWER_OFF)
636 return;
637
638 if (genpd->suspended_count != genpd->device_count
639 || atomic_read(&genpd->sd_count) > 0)
640 return;
641
642 /* Choose the deepest state when suspending */
643 genpd->state_idx = genpd->state_count - 1;
644 genpd_power_off(genpd, false);
645
646 genpd->status = GPD_STATE_POWER_OFF;
647
648 list_for_each_entry(link, &genpd->slave_links, slave_node) {
649 genpd_sd_counter_dec(link->master);
650 genpd_sync_poweroff(link->master);
651 }
652 }
653
654 /**
655 * genpd_sync_poweron - Synchronously power on a PM domain and its masters.
656 * @genpd: PM domain to power on.
657 *
658 * This function is only called in "noirq" and "syscore" stages of system power
659 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
660 * executed sequentially, so it is guaranteed that it will never run twice in
661 * parallel).
662 */
genpd_sync_poweron(struct generic_pm_domain * genpd)663 static void genpd_sync_poweron(struct generic_pm_domain *genpd)
664 {
665 struct gpd_link *link;
666
667 if (genpd->status == GPD_STATE_ACTIVE)
668 return;
669
670 list_for_each_entry(link, &genpd->slave_links, slave_node) {
671 genpd_sync_poweron(link->master);
672 genpd_sd_counter_inc(link->master);
673 }
674
675 genpd_power_on(genpd, false);
676
677 genpd->status = GPD_STATE_ACTIVE;
678 }
679
680 /**
681 * resume_needed - Check whether to resume a device before system suspend.
682 * @dev: Device to check.
683 * @genpd: PM domain the device belongs to.
684 *
685 * There are two cases in which a device that can wake up the system from sleep
686 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
687 * to wake up the system and it has to remain active for this purpose while the
688 * system is in the sleep state and (2) if the device is not enabled to wake up
689 * the system from sleep states and it generally doesn't generate wakeup signals
690 * by itself (those signals are generated on its behalf by other parts of the
691 * system). In the latter case it may be necessary to reconfigure the device's
692 * wakeup settings during system suspend, because it may have been set up to
693 * signal remote wakeup from the system's working state as needed by runtime PM.
694 * Return 'true' in either of the above cases.
695 */
resume_needed(struct device * dev,struct generic_pm_domain * genpd)696 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
697 {
698 bool active_wakeup;
699
700 if (!device_can_wakeup(dev))
701 return false;
702
703 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
704 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
705 }
706
707 /**
708 * pm_genpd_prepare - Start power transition of a device in a PM domain.
709 * @dev: Device to start the transition of.
710 *
711 * Start a power transition of a device (during a system-wide power transition)
712 * under the assumption that its pm_domain field points to the domain member of
713 * an object of type struct generic_pm_domain representing a PM domain
714 * consisting of I/O devices.
715 */
pm_genpd_prepare(struct device * dev)716 static int pm_genpd_prepare(struct device *dev)
717 {
718 struct generic_pm_domain *genpd;
719 int ret;
720
721 dev_dbg(dev, "%s()\n", __func__);
722
723 genpd = dev_to_genpd(dev);
724 if (IS_ERR(genpd))
725 return -EINVAL;
726
727 /*
728 * If a wakeup request is pending for the device, it should be woken up
729 * at this point and a system wakeup event should be reported if it's
730 * set up to wake up the system from sleep states.
731 */
732 if (resume_needed(dev, genpd))
733 pm_runtime_resume(dev);
734
735 mutex_lock(&genpd->lock);
736
737 if (genpd->prepared_count++ == 0)
738 genpd->suspended_count = 0;
739
740 mutex_unlock(&genpd->lock);
741
742 ret = pm_generic_prepare(dev);
743 if (ret) {
744 mutex_lock(&genpd->lock);
745
746 genpd->prepared_count--;
747
748 mutex_unlock(&genpd->lock);
749 }
750
751 return ret;
752 }
753
754 /**
755 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
756 * @dev: Device to suspend.
757 *
758 * Stop the device and remove power from the domain if all devices in it have
759 * been stopped.
760 */
pm_genpd_suspend_noirq(struct device * dev)761 static int pm_genpd_suspend_noirq(struct device *dev)
762 {
763 struct generic_pm_domain *genpd;
764 int ret;
765
766 dev_dbg(dev, "%s()\n", __func__);
767
768 genpd = dev_to_genpd(dev);
769 if (IS_ERR(genpd))
770 return -EINVAL;
771
772 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
773 return 0;
774
775 if (genpd->dev_ops.stop && genpd->dev_ops.start) {
776 ret = pm_runtime_force_suspend(dev);
777 if (ret)
778 return ret;
779 }
780
781 /*
782 * Since all of the "noirq" callbacks are executed sequentially, it is
783 * guaranteed that this function will never run twice in parallel for
784 * the same PM domain, so it is not necessary to use locking here.
785 */
786 genpd->suspended_count++;
787 genpd_sync_poweroff(genpd);
788
789 return 0;
790 }
791
792 /**
793 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
794 * @dev: Device to resume.
795 *
796 * Restore power to the device's PM domain, if necessary, and start the device.
797 */
pm_genpd_resume_noirq(struct device * dev)798 static int pm_genpd_resume_noirq(struct device *dev)
799 {
800 struct generic_pm_domain *genpd;
801 int ret = 0;
802
803 dev_dbg(dev, "%s()\n", __func__);
804
805 genpd = dev_to_genpd(dev);
806 if (IS_ERR(genpd))
807 return -EINVAL;
808
809 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
810 return 0;
811
812 /*
813 * Since all of the "noirq" callbacks are executed sequentially, it is
814 * guaranteed that this function will never run twice in parallel for
815 * the same PM domain, so it is not necessary to use locking here.
816 */
817 genpd_sync_poweron(genpd);
818 genpd->suspended_count--;
819
820 if (genpd->dev_ops.stop && genpd->dev_ops.start)
821 ret = pm_runtime_force_resume(dev);
822
823 return ret;
824 }
825
826 /**
827 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
828 * @dev: Device to freeze.
829 *
830 * Carry out a late freeze of a device under the assumption that its
831 * pm_domain field points to the domain member of an object of type
832 * struct generic_pm_domain representing a power domain consisting of I/O
833 * devices.
834 */
pm_genpd_freeze_noirq(struct device * dev)835 static int pm_genpd_freeze_noirq(struct device *dev)
836 {
837 struct generic_pm_domain *genpd;
838 int ret = 0;
839
840 dev_dbg(dev, "%s()\n", __func__);
841
842 genpd = dev_to_genpd(dev);
843 if (IS_ERR(genpd))
844 return -EINVAL;
845
846 if (genpd->dev_ops.stop && genpd->dev_ops.start)
847 ret = pm_runtime_force_suspend(dev);
848
849 return ret;
850 }
851
852 /**
853 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
854 * @dev: Device to thaw.
855 *
856 * Start the device, unless power has been removed from the domain already
857 * before the system transition.
858 */
pm_genpd_thaw_noirq(struct device * dev)859 static int pm_genpd_thaw_noirq(struct device *dev)
860 {
861 struct generic_pm_domain *genpd;
862 int ret = 0;
863
864 dev_dbg(dev, "%s()\n", __func__);
865
866 genpd = dev_to_genpd(dev);
867 if (IS_ERR(genpd))
868 return -EINVAL;
869
870 if (genpd->dev_ops.stop && genpd->dev_ops.start)
871 ret = pm_runtime_force_resume(dev);
872
873 return ret;
874 }
875
876 /**
877 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
878 * @dev: Device to resume.
879 *
880 * Make sure the domain will be in the same power state as before the
881 * hibernation the system is resuming from and start the device if necessary.
882 */
pm_genpd_restore_noirq(struct device * dev)883 static int pm_genpd_restore_noirq(struct device *dev)
884 {
885 struct generic_pm_domain *genpd;
886 int ret = 0;
887
888 dev_dbg(dev, "%s()\n", __func__);
889
890 genpd = dev_to_genpd(dev);
891 if (IS_ERR(genpd))
892 return -EINVAL;
893
894 /*
895 * Since all of the "noirq" callbacks are executed sequentially, it is
896 * guaranteed that this function will never run twice in parallel for
897 * the same PM domain, so it is not necessary to use locking here.
898 *
899 * At this point suspended_count == 0 means we are being run for the
900 * first time for the given domain in the present cycle.
901 */
902 if (genpd->suspended_count++ == 0)
903 /*
904 * The boot kernel might put the domain into arbitrary state,
905 * so make it appear as powered off to genpd_sync_poweron(),
906 * so that it tries to power it on in case it was really off.
907 */
908 genpd->status = GPD_STATE_POWER_OFF;
909
910 genpd_sync_poweron(genpd);
911
912 if (genpd->dev_ops.stop && genpd->dev_ops.start)
913 ret = pm_runtime_force_resume(dev);
914
915 return ret;
916 }
917
918 /**
919 * pm_genpd_complete - Complete power transition of a device in a power domain.
920 * @dev: Device to complete the transition of.
921 *
922 * Complete a power transition of a device (during a system-wide power
923 * transition) under the assumption that its pm_domain field points to the
924 * domain member of an object of type struct generic_pm_domain representing
925 * a power domain consisting of I/O devices.
926 */
pm_genpd_complete(struct device * dev)927 static void pm_genpd_complete(struct device *dev)
928 {
929 struct generic_pm_domain *genpd;
930
931 dev_dbg(dev, "%s()\n", __func__);
932
933 genpd = dev_to_genpd(dev);
934 if (IS_ERR(genpd))
935 return;
936
937 pm_generic_complete(dev);
938
939 mutex_lock(&genpd->lock);
940
941 genpd->prepared_count--;
942 if (!genpd->prepared_count)
943 genpd_queue_power_off_work(genpd);
944
945 mutex_unlock(&genpd->lock);
946 }
947
948 /**
949 * genpd_syscore_switch - Switch power during system core suspend or resume.
950 * @dev: Device that normally is marked as "always on" to switch power for.
951 *
952 * This routine may only be called during the system core (syscore) suspend or
953 * resume phase for devices whose "always on" flags are set.
954 */
genpd_syscore_switch(struct device * dev,bool suspend)955 static void genpd_syscore_switch(struct device *dev, bool suspend)
956 {
957 struct generic_pm_domain *genpd;
958
959 genpd = dev_to_genpd(dev);
960 if (!pm_genpd_present(genpd))
961 return;
962
963 if (suspend) {
964 genpd->suspended_count++;
965 genpd_sync_poweroff(genpd);
966 } else {
967 genpd_sync_poweron(genpd);
968 genpd->suspended_count--;
969 }
970 }
971
pm_genpd_syscore_poweroff(struct device * dev)972 void pm_genpd_syscore_poweroff(struct device *dev)
973 {
974 genpd_syscore_switch(dev, true);
975 }
976 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
977
pm_genpd_syscore_poweron(struct device * dev)978 void pm_genpd_syscore_poweron(struct device *dev)
979 {
980 genpd_syscore_switch(dev, false);
981 }
982 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
983
984 #else /* !CONFIG_PM_SLEEP */
985
986 #define pm_genpd_prepare NULL
987 #define pm_genpd_suspend_noirq NULL
988 #define pm_genpd_resume_noirq NULL
989 #define pm_genpd_freeze_noirq NULL
990 #define pm_genpd_thaw_noirq NULL
991 #define pm_genpd_restore_noirq NULL
992 #define pm_genpd_complete NULL
993
994 #endif /* CONFIG_PM_SLEEP */
995
genpd_alloc_dev_data(struct device * dev,struct generic_pm_domain * genpd,struct gpd_timing_data * td)996 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
997 struct generic_pm_domain *genpd,
998 struct gpd_timing_data *td)
999 {
1000 struct generic_pm_domain_data *gpd_data;
1001 int ret;
1002
1003 ret = dev_pm_get_subsys_data(dev);
1004 if (ret)
1005 return ERR_PTR(ret);
1006
1007 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1008 if (!gpd_data) {
1009 ret = -ENOMEM;
1010 goto err_put;
1011 }
1012
1013 if (td)
1014 gpd_data->td = *td;
1015
1016 gpd_data->base.dev = dev;
1017 gpd_data->td.constraint_changed = true;
1018 gpd_data->td.effective_constraint_ns = -1;
1019 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1020
1021 spin_lock_irq(&dev->power.lock);
1022
1023 if (dev->power.subsys_data->domain_data) {
1024 ret = -EINVAL;
1025 goto err_free;
1026 }
1027
1028 dev->power.subsys_data->domain_data = &gpd_data->base;
1029
1030 spin_unlock_irq(&dev->power.lock);
1031
1032 return gpd_data;
1033
1034 err_free:
1035 spin_unlock_irq(&dev->power.lock);
1036 kfree(gpd_data);
1037 err_put:
1038 dev_pm_put_subsys_data(dev);
1039 return ERR_PTR(ret);
1040 }
1041
genpd_free_dev_data(struct device * dev,struct generic_pm_domain_data * gpd_data)1042 static void genpd_free_dev_data(struct device *dev,
1043 struct generic_pm_domain_data *gpd_data)
1044 {
1045 spin_lock_irq(&dev->power.lock);
1046
1047 dev->power.subsys_data->domain_data = NULL;
1048
1049 spin_unlock_irq(&dev->power.lock);
1050
1051 kfree(gpd_data);
1052 dev_pm_put_subsys_data(dev);
1053 }
1054
genpd_add_device(struct generic_pm_domain * genpd,struct device * dev,struct gpd_timing_data * td)1055 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1056 struct gpd_timing_data *td)
1057 {
1058 struct generic_pm_domain_data *gpd_data;
1059 int ret = 0;
1060
1061 dev_dbg(dev, "%s()\n", __func__);
1062
1063 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1064 return -EINVAL;
1065
1066 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1067 if (IS_ERR(gpd_data))
1068 return PTR_ERR(gpd_data);
1069
1070 mutex_lock(&genpd->lock);
1071
1072 if (genpd->prepared_count > 0) {
1073 ret = -EAGAIN;
1074 goto out;
1075 }
1076
1077 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1078 if (ret)
1079 goto out;
1080
1081 dev_pm_domain_set(dev, &genpd->domain);
1082
1083 genpd->device_count++;
1084 genpd->max_off_time_changed = true;
1085
1086 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1087
1088 out:
1089 mutex_unlock(&genpd->lock);
1090
1091 if (ret)
1092 genpd_free_dev_data(dev, gpd_data);
1093 else
1094 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1095
1096 return ret;
1097 }
1098
1099 /**
1100 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1101 * @genpd: PM domain to add the device to.
1102 * @dev: Device to be added.
1103 * @td: Set of PM QoS timing parameters to attach to the device.
1104 */
__pm_genpd_add_device(struct generic_pm_domain * genpd,struct device * dev,struct gpd_timing_data * td)1105 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1106 struct gpd_timing_data *td)
1107 {
1108 int ret;
1109
1110 mutex_lock(&gpd_list_lock);
1111 ret = genpd_add_device(genpd, dev, td);
1112 mutex_unlock(&gpd_list_lock);
1113
1114 return ret;
1115 }
1116 EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1117
genpd_remove_device(struct generic_pm_domain * genpd,struct device * dev)1118 static int genpd_remove_device(struct generic_pm_domain *genpd,
1119 struct device *dev)
1120 {
1121 struct generic_pm_domain_data *gpd_data;
1122 struct pm_domain_data *pdd;
1123 int ret = 0;
1124
1125 dev_dbg(dev, "%s()\n", __func__);
1126
1127 pdd = dev->power.subsys_data->domain_data;
1128 gpd_data = to_gpd_data(pdd);
1129 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1130
1131 mutex_lock(&genpd->lock);
1132
1133 if (genpd->prepared_count > 0) {
1134 ret = -EAGAIN;
1135 goto out;
1136 }
1137
1138 genpd->device_count--;
1139 genpd->max_off_time_changed = true;
1140
1141 if (genpd->detach_dev)
1142 genpd->detach_dev(genpd, dev);
1143
1144 dev_pm_domain_set(dev, NULL);
1145
1146 list_del_init(&pdd->list_node);
1147
1148 mutex_unlock(&genpd->lock);
1149
1150 genpd_free_dev_data(dev, gpd_data);
1151
1152 return 0;
1153
1154 out:
1155 mutex_unlock(&genpd->lock);
1156 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1157
1158 return ret;
1159 }
1160
1161 /**
1162 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1163 * @genpd: PM domain to remove the device from.
1164 * @dev: Device to be removed.
1165 */
pm_genpd_remove_device(struct generic_pm_domain * genpd,struct device * dev)1166 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1167 struct device *dev)
1168 {
1169 if (!genpd || genpd != genpd_lookup_dev(dev))
1170 return -EINVAL;
1171
1172 return genpd_remove_device(genpd, dev);
1173 }
1174 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1175
genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)1176 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1177 struct generic_pm_domain *subdomain)
1178 {
1179 struct gpd_link *link, *itr;
1180 int ret = 0;
1181
1182 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1183 || genpd == subdomain)
1184 return -EINVAL;
1185
1186 link = kzalloc(sizeof(*link), GFP_KERNEL);
1187 if (!link)
1188 return -ENOMEM;
1189
1190 mutex_lock(&subdomain->lock);
1191 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
1192
1193 if (genpd->status == GPD_STATE_POWER_OFF
1194 && subdomain->status != GPD_STATE_POWER_OFF) {
1195 ret = -EINVAL;
1196 goto out;
1197 }
1198
1199 list_for_each_entry(itr, &genpd->master_links, master_node) {
1200 if (itr->slave == subdomain && itr->master == genpd) {
1201 ret = -EINVAL;
1202 goto out;
1203 }
1204 }
1205
1206 link->master = genpd;
1207 list_add_tail(&link->master_node, &genpd->master_links);
1208 link->slave = subdomain;
1209 list_add_tail(&link->slave_node, &subdomain->slave_links);
1210 if (subdomain->status != GPD_STATE_POWER_OFF)
1211 genpd_sd_counter_inc(genpd);
1212
1213 out:
1214 mutex_unlock(&genpd->lock);
1215 mutex_unlock(&subdomain->lock);
1216 if (ret)
1217 kfree(link);
1218 return ret;
1219 }
1220
1221 /**
1222 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1223 * @genpd: Master PM domain to add the subdomain to.
1224 * @subdomain: Subdomain to be added.
1225 */
pm_genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)1226 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1227 struct generic_pm_domain *subdomain)
1228 {
1229 int ret;
1230
1231 mutex_lock(&gpd_list_lock);
1232 ret = genpd_add_subdomain(genpd, subdomain);
1233 mutex_unlock(&gpd_list_lock);
1234
1235 return ret;
1236 }
1237 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1238
1239 /**
1240 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1241 * @genpd: Master PM domain to remove the subdomain from.
1242 * @subdomain: Subdomain to be removed.
1243 */
pm_genpd_remove_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)1244 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1245 struct generic_pm_domain *subdomain)
1246 {
1247 struct gpd_link *l, *link;
1248 int ret = -EINVAL;
1249
1250 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1251 return -EINVAL;
1252
1253 mutex_lock(&subdomain->lock);
1254 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
1255
1256 if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1257 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1258 subdomain->name);
1259 ret = -EBUSY;
1260 goto out;
1261 }
1262
1263 list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1264 if (link->slave != subdomain)
1265 continue;
1266
1267 list_del(&link->master_node);
1268 list_del(&link->slave_node);
1269 kfree(link);
1270 if (subdomain->status != GPD_STATE_POWER_OFF)
1271 genpd_sd_counter_dec(genpd);
1272
1273 ret = 0;
1274 break;
1275 }
1276
1277 out:
1278 mutex_unlock(&genpd->lock);
1279 mutex_unlock(&subdomain->lock);
1280
1281 return ret;
1282 }
1283 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1284
1285 /**
1286 * pm_genpd_init - Initialize a generic I/O PM domain object.
1287 * @genpd: PM domain object to initialize.
1288 * @gov: PM domain governor to associate with the domain (may be NULL).
1289 * @is_off: Initial value of the domain's power_is_off field.
1290 *
1291 * Returns 0 on successful initialization, else a negative error code.
1292 */
pm_genpd_init(struct generic_pm_domain * genpd,struct dev_power_governor * gov,bool is_off)1293 int pm_genpd_init(struct generic_pm_domain *genpd,
1294 struct dev_power_governor *gov, bool is_off)
1295 {
1296 if (IS_ERR_OR_NULL(genpd))
1297 return -EINVAL;
1298
1299 INIT_LIST_HEAD(&genpd->master_links);
1300 INIT_LIST_HEAD(&genpd->slave_links);
1301 INIT_LIST_HEAD(&genpd->dev_list);
1302 mutex_init(&genpd->lock);
1303 genpd->gov = gov;
1304 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1305 atomic_set(&genpd->sd_count, 0);
1306 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1307 genpd->device_count = 0;
1308 genpd->max_off_time_ns = -1;
1309 genpd->max_off_time_changed = true;
1310 genpd->provider = NULL;
1311 genpd->has_provider = false;
1312 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1313 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1314 genpd->domain.ops.prepare = pm_genpd_prepare;
1315 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1316 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1317 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1318 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1319 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1320 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1321 genpd->domain.ops.complete = pm_genpd_complete;
1322
1323 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1324 genpd->dev_ops.stop = pm_clk_suspend;
1325 genpd->dev_ops.start = pm_clk_resume;
1326 }
1327
1328 if (genpd->state_idx >= GENPD_MAX_NUM_STATES) {
1329 pr_warn("Initial state index out of bounds.\n");
1330 genpd->state_idx = GENPD_MAX_NUM_STATES - 1;
1331 }
1332
1333 if (genpd->state_count > GENPD_MAX_NUM_STATES) {
1334 pr_warn("Limiting states to %d\n", GENPD_MAX_NUM_STATES);
1335 genpd->state_count = GENPD_MAX_NUM_STATES;
1336 }
1337
1338 /* Use only one "off" state if there were no states declared */
1339 if (genpd->state_count == 0)
1340 genpd->state_count = 1;
1341
1342 mutex_lock(&gpd_list_lock);
1343 list_add(&genpd->gpd_list_node, &gpd_list);
1344 mutex_unlock(&gpd_list_lock);
1345
1346 return 0;
1347 }
1348 EXPORT_SYMBOL_GPL(pm_genpd_init);
1349
genpd_remove(struct generic_pm_domain * genpd)1350 static int genpd_remove(struct generic_pm_domain *genpd)
1351 {
1352 struct gpd_link *l, *link;
1353
1354 if (IS_ERR_OR_NULL(genpd))
1355 return -EINVAL;
1356
1357 mutex_lock(&genpd->lock);
1358
1359 if (genpd->has_provider) {
1360 mutex_unlock(&genpd->lock);
1361 pr_err("Provider present, unable to remove %s\n", genpd->name);
1362 return -EBUSY;
1363 }
1364
1365 if (!list_empty(&genpd->master_links) || genpd->device_count) {
1366 mutex_unlock(&genpd->lock);
1367 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1368 return -EBUSY;
1369 }
1370
1371 list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1372 list_del(&link->master_node);
1373 list_del(&link->slave_node);
1374 kfree(link);
1375 }
1376
1377 list_del(&genpd->gpd_list_node);
1378 mutex_unlock(&genpd->lock);
1379 cancel_work_sync(&genpd->power_off_work);
1380 pr_debug("%s: removed %s\n", __func__, genpd->name);
1381
1382 return 0;
1383 }
1384
1385 /**
1386 * pm_genpd_remove - Remove a generic I/O PM domain
1387 * @genpd: Pointer to PM domain that is to be removed.
1388 *
1389 * To remove the PM domain, this function:
1390 * - Removes the PM domain as a subdomain to any parent domains,
1391 * if it was added.
1392 * - Removes the PM domain from the list of registered PM domains.
1393 *
1394 * The PM domain will only be removed, if the associated provider has
1395 * been removed, it is not a parent to any other PM domain and has no
1396 * devices associated with it.
1397 */
pm_genpd_remove(struct generic_pm_domain * genpd)1398 int pm_genpd_remove(struct generic_pm_domain *genpd)
1399 {
1400 int ret;
1401
1402 mutex_lock(&gpd_list_lock);
1403 ret = genpd_remove(genpd);
1404 mutex_unlock(&gpd_list_lock);
1405
1406 return ret;
1407 }
1408 EXPORT_SYMBOL_GPL(pm_genpd_remove);
1409
1410 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1411
1412 typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
1413 void *data);
1414
1415 /*
1416 * Device Tree based PM domain providers.
1417 *
1418 * The code below implements generic device tree based PM domain providers that
1419 * bind device tree nodes with generic PM domains registered in the system.
1420 *
1421 * Any driver that registers generic PM domains and needs to support binding of
1422 * devices to these domains is supposed to register a PM domain provider, which
1423 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1424 *
1425 * Two simple mapping functions have been provided for convenience:
1426 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1427 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1428 * index.
1429 */
1430
1431 /**
1432 * struct of_genpd_provider - PM domain provider registration structure
1433 * @link: Entry in global list of PM domain providers
1434 * @node: Pointer to device tree node of PM domain provider
1435 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1436 * into a PM domain.
1437 * @data: context pointer to be passed into @xlate callback
1438 */
1439 struct of_genpd_provider {
1440 struct list_head link;
1441 struct device_node *node;
1442 genpd_xlate_t xlate;
1443 void *data;
1444 };
1445
1446 /* List of registered PM domain providers. */
1447 static LIST_HEAD(of_genpd_providers);
1448 /* Mutex to protect the list above. */
1449 static DEFINE_MUTEX(of_genpd_mutex);
1450
1451 /**
1452 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1453 * @genpdspec: OF phandle args to map into a PM domain
1454 * @data: xlate function private data - pointer to struct generic_pm_domain
1455 *
1456 * This is a generic xlate function that can be used to model PM domains that
1457 * have their own device tree nodes. The private data of xlate function needs
1458 * to be a valid pointer to struct generic_pm_domain.
1459 */
genpd_xlate_simple(struct of_phandle_args * genpdspec,void * data)1460 static struct generic_pm_domain *genpd_xlate_simple(
1461 struct of_phandle_args *genpdspec,
1462 void *data)
1463 {
1464 if (genpdspec->args_count != 0)
1465 return ERR_PTR(-EINVAL);
1466 return data;
1467 }
1468
1469 /**
1470 * genpd_xlate_onecell() - Xlate function using a single index.
1471 * @genpdspec: OF phandle args to map into a PM domain
1472 * @data: xlate function private data - pointer to struct genpd_onecell_data
1473 *
1474 * This is a generic xlate function that can be used to model simple PM domain
1475 * controllers that have one device tree node and provide multiple PM domains.
1476 * A single cell is used as an index into an array of PM domains specified in
1477 * the genpd_onecell_data struct when registering the provider.
1478 */
genpd_xlate_onecell(struct of_phandle_args * genpdspec,void * data)1479 static struct generic_pm_domain *genpd_xlate_onecell(
1480 struct of_phandle_args *genpdspec,
1481 void *data)
1482 {
1483 struct genpd_onecell_data *genpd_data = data;
1484 unsigned int idx = genpdspec->args[0];
1485
1486 if (genpdspec->args_count != 1)
1487 return ERR_PTR(-EINVAL);
1488
1489 if (idx >= genpd_data->num_domains) {
1490 pr_err("%s: invalid domain index %u\n", __func__, idx);
1491 return ERR_PTR(-EINVAL);
1492 }
1493
1494 if (!genpd_data->domains[idx])
1495 return ERR_PTR(-ENOENT);
1496
1497 return genpd_data->domains[idx];
1498 }
1499
1500 /**
1501 * genpd_add_provider() - Register a PM domain provider for a node
1502 * @np: Device node pointer associated with the PM domain provider.
1503 * @xlate: Callback for decoding PM domain from phandle arguments.
1504 * @data: Context pointer for @xlate callback.
1505 */
genpd_add_provider(struct device_node * np,genpd_xlate_t xlate,void * data)1506 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1507 void *data)
1508 {
1509 struct of_genpd_provider *cp;
1510
1511 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1512 if (!cp)
1513 return -ENOMEM;
1514
1515 cp->node = of_node_get(np);
1516 cp->data = data;
1517 cp->xlate = xlate;
1518
1519 mutex_lock(&of_genpd_mutex);
1520 list_add(&cp->link, &of_genpd_providers);
1521 mutex_unlock(&of_genpd_mutex);
1522 pr_debug("Added domain provider from %s\n", np->full_name);
1523
1524 return 0;
1525 }
1526
1527 /**
1528 * of_genpd_add_provider_simple() - Register a simple PM domain provider
1529 * @np: Device node pointer associated with the PM domain provider.
1530 * @genpd: Pointer to PM domain associated with the PM domain provider.
1531 */
of_genpd_add_provider_simple(struct device_node * np,struct generic_pm_domain * genpd)1532 int of_genpd_add_provider_simple(struct device_node *np,
1533 struct generic_pm_domain *genpd)
1534 {
1535 int ret = -EINVAL;
1536
1537 if (!np || !genpd)
1538 return -EINVAL;
1539
1540 mutex_lock(&gpd_list_lock);
1541
1542 if (pm_genpd_present(genpd))
1543 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1544
1545 if (!ret) {
1546 genpd->provider = &np->fwnode;
1547 genpd->has_provider = true;
1548 }
1549
1550 mutex_unlock(&gpd_list_lock);
1551
1552 return ret;
1553 }
1554 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1555
1556 /**
1557 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
1558 * @np: Device node pointer associated with the PM domain provider.
1559 * @data: Pointer to the data associated with the PM domain provider.
1560 */
of_genpd_add_provider_onecell(struct device_node * np,struct genpd_onecell_data * data)1561 int of_genpd_add_provider_onecell(struct device_node *np,
1562 struct genpd_onecell_data *data)
1563 {
1564 unsigned int i;
1565 int ret = -EINVAL;
1566
1567 if (!np || !data)
1568 return -EINVAL;
1569
1570 mutex_lock(&gpd_list_lock);
1571
1572 for (i = 0; i < data->num_domains; i++) {
1573 if (!data->domains[i])
1574 continue;
1575 if (!pm_genpd_present(data->domains[i]))
1576 goto error;
1577
1578 data->domains[i]->provider = &np->fwnode;
1579 data->domains[i]->has_provider = true;
1580 }
1581
1582 ret = genpd_add_provider(np, genpd_xlate_onecell, data);
1583 if (ret < 0)
1584 goto error;
1585
1586 mutex_unlock(&gpd_list_lock);
1587
1588 return 0;
1589
1590 error:
1591 while (i--) {
1592 if (!data->domains[i])
1593 continue;
1594 data->domains[i]->provider = NULL;
1595 data->domains[i]->has_provider = false;
1596 }
1597
1598 mutex_unlock(&gpd_list_lock);
1599
1600 return ret;
1601 }
1602 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
1603
1604 /**
1605 * of_genpd_del_provider() - Remove a previously registered PM domain provider
1606 * @np: Device node pointer associated with the PM domain provider
1607 */
of_genpd_del_provider(struct device_node * np)1608 void of_genpd_del_provider(struct device_node *np)
1609 {
1610 struct of_genpd_provider *cp, *tmp;
1611 struct generic_pm_domain *gpd;
1612
1613 mutex_lock(&gpd_list_lock);
1614 mutex_lock(&of_genpd_mutex);
1615 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
1616 if (cp->node == np) {
1617 /*
1618 * For each PM domain associated with the
1619 * provider, set the 'has_provider' to false
1620 * so that the PM domain can be safely removed.
1621 */
1622 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
1623 if (gpd->provider == &np->fwnode)
1624 gpd->has_provider = false;
1625
1626 list_del(&cp->link);
1627 of_node_put(cp->node);
1628 kfree(cp);
1629 break;
1630 }
1631 }
1632 mutex_unlock(&of_genpd_mutex);
1633 mutex_unlock(&gpd_list_lock);
1634 }
1635 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1636
1637 /**
1638 * genpd_get_from_provider() - Look-up PM domain
1639 * @genpdspec: OF phandle args to use for look-up
1640 *
1641 * Looks for a PM domain provider under the node specified by @genpdspec and if
1642 * found, uses xlate function of the provider to map phandle args to a PM
1643 * domain.
1644 *
1645 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1646 * on failure.
1647 */
genpd_get_from_provider(struct of_phandle_args * genpdspec)1648 static struct generic_pm_domain *genpd_get_from_provider(
1649 struct of_phandle_args *genpdspec)
1650 {
1651 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1652 struct of_genpd_provider *provider;
1653
1654 if (!genpdspec)
1655 return ERR_PTR(-EINVAL);
1656
1657 mutex_lock(&of_genpd_mutex);
1658
1659 /* Check if we have such a provider in our array */
1660 list_for_each_entry(provider, &of_genpd_providers, link) {
1661 if (provider->node == genpdspec->np)
1662 genpd = provider->xlate(genpdspec, provider->data);
1663 if (!IS_ERR(genpd))
1664 break;
1665 }
1666
1667 mutex_unlock(&of_genpd_mutex);
1668
1669 return genpd;
1670 }
1671
1672 /**
1673 * of_genpd_add_device() - Add a device to an I/O PM domain
1674 * @genpdspec: OF phandle args to use for look-up PM domain
1675 * @dev: Device to be added.
1676 *
1677 * Looks-up an I/O PM domain based upon phandle args provided and adds
1678 * the device to the PM domain. Returns a negative error code on failure.
1679 */
of_genpd_add_device(struct of_phandle_args * genpdspec,struct device * dev)1680 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
1681 {
1682 struct generic_pm_domain *genpd;
1683 int ret;
1684
1685 mutex_lock(&gpd_list_lock);
1686
1687 genpd = genpd_get_from_provider(genpdspec);
1688 if (IS_ERR(genpd)) {
1689 ret = PTR_ERR(genpd);
1690 goto out;
1691 }
1692
1693 ret = genpd_add_device(genpd, dev, NULL);
1694
1695 out:
1696 mutex_unlock(&gpd_list_lock);
1697
1698 return ret;
1699 }
1700 EXPORT_SYMBOL_GPL(of_genpd_add_device);
1701
1702 /**
1703 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1704 * @parent_spec: OF phandle args to use for parent PM domain look-up
1705 * @subdomain_spec: OF phandle args to use for subdomain look-up
1706 *
1707 * Looks-up a parent PM domain and subdomain based upon phandle args
1708 * provided and adds the subdomain to the parent PM domain. Returns a
1709 * negative error code on failure.
1710 */
of_genpd_add_subdomain(struct of_phandle_args * parent_spec,struct of_phandle_args * subdomain_spec)1711 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
1712 struct of_phandle_args *subdomain_spec)
1713 {
1714 struct generic_pm_domain *parent, *subdomain;
1715 int ret;
1716
1717 mutex_lock(&gpd_list_lock);
1718
1719 parent = genpd_get_from_provider(parent_spec);
1720 if (IS_ERR(parent)) {
1721 ret = PTR_ERR(parent);
1722 goto out;
1723 }
1724
1725 subdomain = genpd_get_from_provider(subdomain_spec);
1726 if (IS_ERR(subdomain)) {
1727 ret = PTR_ERR(subdomain);
1728 goto out;
1729 }
1730
1731 ret = genpd_add_subdomain(parent, subdomain);
1732
1733 out:
1734 mutex_unlock(&gpd_list_lock);
1735
1736 return ret;
1737 }
1738 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
1739
1740 /**
1741 * of_genpd_remove_last - Remove the last PM domain registered for a provider
1742 * @provider: Pointer to device structure associated with provider
1743 *
1744 * Find the last PM domain that was added by a particular provider and
1745 * remove this PM domain from the list of PM domains. The provider is
1746 * identified by the 'provider' device structure that is passed. The PM
1747 * domain will only be removed, if the provider associated with domain
1748 * has been removed.
1749 *
1750 * Returns a valid pointer to struct generic_pm_domain on success or
1751 * ERR_PTR() on failure.
1752 */
of_genpd_remove_last(struct device_node * np)1753 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
1754 {
1755 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
1756 int ret;
1757
1758 if (IS_ERR_OR_NULL(np))
1759 return ERR_PTR(-EINVAL);
1760
1761 mutex_lock(&gpd_list_lock);
1762 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
1763 if (gpd->provider == &np->fwnode) {
1764 ret = genpd_remove(gpd);
1765 genpd = ret ? ERR_PTR(ret) : gpd;
1766 break;
1767 }
1768 }
1769 mutex_unlock(&gpd_list_lock);
1770
1771 return genpd;
1772 }
1773 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
1774
1775 /**
1776 * genpd_dev_pm_detach - Detach a device from its PM domain.
1777 * @dev: Device to detach.
1778 * @power_off: Currently not used
1779 *
1780 * Try to locate a corresponding generic PM domain, which the device was
1781 * attached to previously. If such is found, the device is detached from it.
1782 */
genpd_dev_pm_detach(struct device * dev,bool power_off)1783 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1784 {
1785 struct generic_pm_domain *pd;
1786 unsigned int i;
1787 int ret = 0;
1788
1789 pd = dev_to_genpd(dev);
1790 if (IS_ERR(pd))
1791 return;
1792
1793 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1794
1795 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1796 ret = genpd_remove_device(pd, dev);
1797 if (ret != -EAGAIN)
1798 break;
1799
1800 mdelay(i);
1801 cond_resched();
1802 }
1803
1804 if (ret < 0) {
1805 dev_err(dev, "failed to remove from PM domain %s: %d",
1806 pd->name, ret);
1807 return;
1808 }
1809
1810 /* Check if PM domain can be powered off after removing this device. */
1811 genpd_queue_power_off_work(pd);
1812 }
1813
genpd_dev_pm_sync(struct device * dev)1814 static void genpd_dev_pm_sync(struct device *dev)
1815 {
1816 struct generic_pm_domain *pd;
1817
1818 pd = dev_to_genpd(dev);
1819 if (IS_ERR(pd))
1820 return;
1821
1822 genpd_queue_power_off_work(pd);
1823 }
1824
1825 /**
1826 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
1827 * @dev: Device to attach.
1828 *
1829 * Parse device's OF node to find a PM domain specifier. If such is found,
1830 * attaches the device to retrieved pm_domain ops.
1831 *
1832 * Both generic and legacy Samsung-specific DT bindings are supported to keep
1833 * backwards compatibility with existing DTBs.
1834 *
1835 * Returns 0 on successfully attached PM domain or negative error code. Note
1836 * that if a power-domain exists for the device, but it cannot be found or
1837 * turned on, then return -EPROBE_DEFER to ensure that the device is not
1838 * probed and to re-try again later.
1839 */
genpd_dev_pm_attach(struct device * dev)1840 int genpd_dev_pm_attach(struct device *dev)
1841 {
1842 struct of_phandle_args pd_args;
1843 struct generic_pm_domain *pd;
1844 unsigned int i;
1845 int ret;
1846
1847 if (!dev->of_node)
1848 return -ENODEV;
1849
1850 if (dev->pm_domain)
1851 return -EEXIST;
1852
1853 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
1854 "#power-domain-cells", 0, &pd_args);
1855 if (ret < 0) {
1856 if (ret != -ENOENT)
1857 return ret;
1858
1859 /*
1860 * Try legacy Samsung-specific bindings
1861 * (for backwards compatibility of DT ABI)
1862 */
1863 pd_args.args_count = 0;
1864 pd_args.np = of_parse_phandle(dev->of_node,
1865 "samsung,power-domain", 0);
1866 if (!pd_args.np)
1867 return -ENOENT;
1868 }
1869
1870 mutex_lock(&gpd_list_lock);
1871 pd = genpd_get_from_provider(&pd_args);
1872 of_node_put(pd_args.np);
1873 if (IS_ERR(pd)) {
1874 mutex_unlock(&gpd_list_lock);
1875 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
1876 __func__, PTR_ERR(pd));
1877 return -EPROBE_DEFER;
1878 }
1879
1880 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
1881
1882 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1883 ret = genpd_add_device(pd, dev, NULL);
1884 if (ret != -EAGAIN)
1885 break;
1886
1887 mdelay(i);
1888 cond_resched();
1889 }
1890 mutex_unlock(&gpd_list_lock);
1891
1892 if (ret < 0) {
1893 dev_err(dev, "failed to add to PM domain %s: %d",
1894 pd->name, ret);
1895 goto out;
1896 }
1897
1898 dev->pm_domain->detach = genpd_dev_pm_detach;
1899 dev->pm_domain->sync = genpd_dev_pm_sync;
1900
1901 mutex_lock(&pd->lock);
1902 ret = genpd_poweron(pd, 0);
1903 mutex_unlock(&pd->lock);
1904 out:
1905 return ret ? -EPROBE_DEFER : 0;
1906 }
1907 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
1908 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
1909
1910
1911 /*** debugfs support ***/
1912
1913 #ifdef CONFIG_DEBUG_FS
1914 #include <linux/pm.h>
1915 #include <linux/device.h>
1916 #include <linux/debugfs.h>
1917 #include <linux/seq_file.h>
1918 #include <linux/init.h>
1919 #include <linux/kobject.h>
1920 static struct dentry *pm_genpd_debugfs_dir;
1921
1922 /*
1923 * TODO: This function is a slightly modified version of rtpm_status_show
1924 * from sysfs.c, so generalize it.
1925 */
rtpm_status_str(struct seq_file * s,struct device * dev)1926 static void rtpm_status_str(struct seq_file *s, struct device *dev)
1927 {
1928 static const char * const status_lookup[] = {
1929 [RPM_ACTIVE] = "active",
1930 [RPM_RESUMING] = "resuming",
1931 [RPM_SUSPENDED] = "suspended",
1932 [RPM_SUSPENDING] = "suspending"
1933 };
1934 const char *p = "";
1935
1936 if (dev->power.runtime_error)
1937 p = "error";
1938 else if (dev->power.disable_depth)
1939 p = "unsupported";
1940 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
1941 p = status_lookup[dev->power.runtime_status];
1942 else
1943 WARN_ON(1);
1944
1945 seq_puts(s, p);
1946 }
1947
pm_genpd_summary_one(struct seq_file * s,struct generic_pm_domain * genpd)1948 static int pm_genpd_summary_one(struct seq_file *s,
1949 struct generic_pm_domain *genpd)
1950 {
1951 static const char * const status_lookup[] = {
1952 [GPD_STATE_ACTIVE] = "on",
1953 [GPD_STATE_POWER_OFF] = "off"
1954 };
1955 struct pm_domain_data *pm_data;
1956 const char *kobj_path;
1957 struct gpd_link *link;
1958 char state[16];
1959 int ret;
1960
1961 ret = mutex_lock_interruptible(&genpd->lock);
1962 if (ret)
1963 return -ERESTARTSYS;
1964
1965 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
1966 goto exit;
1967 if (genpd->status == GPD_STATE_POWER_OFF)
1968 snprintf(state, sizeof(state), "%s-%u",
1969 status_lookup[genpd->status], genpd->state_idx);
1970 else
1971 snprintf(state, sizeof(state), "%s",
1972 status_lookup[genpd->status]);
1973 seq_printf(s, "%-30s %-15s ", genpd->name, state);
1974
1975 /*
1976 * Modifications on the list require holding locks on both
1977 * master and slave, so we are safe.
1978 * Also genpd->name is immutable.
1979 */
1980 list_for_each_entry(link, &genpd->master_links, master_node) {
1981 seq_printf(s, "%s", link->slave->name);
1982 if (!list_is_last(&link->master_node, &genpd->master_links))
1983 seq_puts(s, ", ");
1984 }
1985
1986 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
1987 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
1988 if (kobj_path == NULL)
1989 continue;
1990
1991 seq_printf(s, "\n %-50s ", kobj_path);
1992 rtpm_status_str(s, pm_data->dev);
1993 kfree(kobj_path);
1994 }
1995
1996 seq_puts(s, "\n");
1997 exit:
1998 mutex_unlock(&genpd->lock);
1999
2000 return 0;
2001 }
2002
pm_genpd_summary_show(struct seq_file * s,void * data)2003 static int pm_genpd_summary_show(struct seq_file *s, void *data)
2004 {
2005 struct generic_pm_domain *genpd;
2006 int ret = 0;
2007
2008 seq_puts(s, "domain status slaves\n");
2009 seq_puts(s, " /device runtime status\n");
2010 seq_puts(s, "----------------------------------------------------------------------\n");
2011
2012 ret = mutex_lock_interruptible(&gpd_list_lock);
2013 if (ret)
2014 return -ERESTARTSYS;
2015
2016 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2017 ret = pm_genpd_summary_one(s, genpd);
2018 if (ret)
2019 break;
2020 }
2021 mutex_unlock(&gpd_list_lock);
2022
2023 return ret;
2024 }
2025
pm_genpd_summary_open(struct inode * inode,struct file * file)2026 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2027 {
2028 return single_open(file, pm_genpd_summary_show, NULL);
2029 }
2030
2031 static const struct file_operations pm_genpd_summary_fops = {
2032 .open = pm_genpd_summary_open,
2033 .read = seq_read,
2034 .llseek = seq_lseek,
2035 .release = single_release,
2036 };
2037
pm_genpd_debug_init(void)2038 static int __init pm_genpd_debug_init(void)
2039 {
2040 struct dentry *d;
2041
2042 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2043
2044 if (!pm_genpd_debugfs_dir)
2045 return -ENOMEM;
2046
2047 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2048 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2049 if (!d)
2050 return -ENOMEM;
2051
2052 return 0;
2053 }
2054 late_initcall(pm_genpd_debug_init);
2055
pm_genpd_debug_exit(void)2056 static void __exit pm_genpd_debug_exit(void)
2057 {
2058 debugfs_remove_recursive(pm_genpd_debugfs_dir);
2059 }
2060 __exitcall(pm_genpd_debug_exit);
2061 #endif /* CONFIG_DEBUG_FS */
2062