• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
22 
23 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
24 
25 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
26 ({								\
27 	type (*__routine)(struct device *__d); 			\
28 	type __ret = (type)0;					\
29 								\
30 	__routine = genpd->dev_ops.callback; 			\
31 	if (__routine) {					\
32 		__ret = __routine(dev); 			\
33 	}							\
34 	__ret;							\
35 })
36 
37 static LIST_HEAD(gpd_list);
38 static DEFINE_MUTEX(gpd_list_lock);
39 
40 /*
41  * Get the generic PM domain for a particular struct device.
42  * This validates the struct device pointer, the PM domain pointer,
43  * and checks that the PM domain pointer is a real generic PM domain.
44  * Any failure results in NULL being returned.
45  */
pm_genpd_lookup_dev(struct device * dev)46 struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
47 {
48 	struct generic_pm_domain *genpd = NULL, *gpd;
49 
50 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
51 		return NULL;
52 
53 	mutex_lock(&gpd_list_lock);
54 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
55 		if (&gpd->domain == dev->pm_domain) {
56 			genpd = gpd;
57 			break;
58 		}
59 	}
60 	mutex_unlock(&gpd_list_lock);
61 
62 	return genpd;
63 }
64 
65 /*
66  * This should only be used where we are certain that the pm_domain
67  * attached to the device is a genpd domain.
68  */
dev_to_genpd(struct device * dev)69 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
70 {
71 	if (IS_ERR_OR_NULL(dev->pm_domain))
72 		return ERR_PTR(-EINVAL);
73 
74 	return pd_to_genpd(dev->pm_domain);
75 }
76 
genpd_stop_dev(struct generic_pm_domain * genpd,struct device * dev)77 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
78 {
79 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
80 }
81 
genpd_start_dev(struct generic_pm_domain * genpd,struct device * dev)82 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
83 {
84 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
85 }
86 
genpd_sd_counter_dec(struct generic_pm_domain * genpd)87 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
88 {
89 	bool ret = false;
90 
91 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
92 		ret = !!atomic_dec_and_test(&genpd->sd_count);
93 
94 	return ret;
95 }
96 
genpd_sd_counter_inc(struct generic_pm_domain * genpd)97 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
98 {
99 	atomic_inc(&genpd->sd_count);
100 	smp_mb__after_atomic();
101 }
102 
genpd_power_on(struct generic_pm_domain * genpd,bool timed)103 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
104 {
105 	ktime_t time_start;
106 	s64 elapsed_ns;
107 	int ret;
108 
109 	if (!genpd->power_on)
110 		return 0;
111 
112 	if (!timed)
113 		return genpd->power_on(genpd);
114 
115 	time_start = ktime_get();
116 	ret = genpd->power_on(genpd);
117 	if (ret)
118 		return ret;
119 
120 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
121 	if (elapsed_ns <= genpd->power_on_latency_ns)
122 		return ret;
123 
124 	genpd->power_on_latency_ns = elapsed_ns;
125 	genpd->max_off_time_changed = true;
126 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
127 		 genpd->name, "on", elapsed_ns);
128 
129 	return ret;
130 }
131 
genpd_power_off(struct generic_pm_domain * genpd,bool timed)132 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
133 {
134 	ktime_t time_start;
135 	s64 elapsed_ns;
136 	int ret;
137 
138 	if (!genpd->power_off)
139 		return 0;
140 
141 	if (!timed)
142 		return genpd->power_off(genpd);
143 
144 	time_start = ktime_get();
145 	ret = genpd->power_off(genpd);
146 	if (ret == -EBUSY)
147 		return ret;
148 
149 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
150 	if (elapsed_ns <= genpd->power_off_latency_ns)
151 		return ret;
152 
153 	genpd->power_off_latency_ns = elapsed_ns;
154 	genpd->max_off_time_changed = true;
155 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
156 		 genpd->name, "off", elapsed_ns);
157 
158 	return ret;
159 }
160 
161 /**
162  * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
163  * @genpd: PM domait to power off.
164  *
165  * Queue up the execution of genpd_poweroff() unless it's already been done
166  * before.
167  */
genpd_queue_power_off_work(struct generic_pm_domain * genpd)168 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
169 {
170 	queue_work(pm_wq, &genpd->power_off_work);
171 }
172 
173 static int genpd_poweron(struct generic_pm_domain *genpd);
174 
175 /**
176  * __genpd_poweron - Restore power to a given PM domain and its masters.
177  * @genpd: PM domain to power up.
178  *
179  * Restore power to @genpd and all of its masters so that it is possible to
180  * resume a device belonging to it.
181  */
__genpd_poweron(struct generic_pm_domain * genpd)182 static int __genpd_poweron(struct generic_pm_domain *genpd)
183 {
184 	struct gpd_link *link;
185 	int ret = 0;
186 
187 	if (genpd->status == GPD_STATE_ACTIVE
188 	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
189 		return 0;
190 
191 	/*
192 	 * The list is guaranteed not to change while the loop below is being
193 	 * executed, unless one of the masters' .power_on() callbacks fiddles
194 	 * with it.
195 	 */
196 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
197 		genpd_sd_counter_inc(link->master);
198 
199 		ret = genpd_poweron(link->master);
200 		if (ret) {
201 			genpd_sd_counter_dec(link->master);
202 			goto err;
203 		}
204 	}
205 
206 	ret = genpd_power_on(genpd, true);
207 	if (ret)
208 		goto err;
209 
210 	genpd->status = GPD_STATE_ACTIVE;
211 	return 0;
212 
213  err:
214 	list_for_each_entry_continue_reverse(link,
215 					&genpd->slave_links,
216 					slave_node) {
217 		genpd_sd_counter_dec(link->master);
218 		genpd_queue_power_off_work(link->master);
219 	}
220 
221 	return ret;
222 }
223 
224 /**
225  * genpd_poweron - Restore power to a given PM domain and its masters.
226  * @genpd: PM domain to power up.
227  */
genpd_poweron(struct generic_pm_domain * genpd)228 static int genpd_poweron(struct generic_pm_domain *genpd)
229 {
230 	int ret;
231 
232 	mutex_lock(&genpd->lock);
233 	ret = __genpd_poweron(genpd);
234 	mutex_unlock(&genpd->lock);
235 	return ret;
236 }
237 
genpd_save_dev(struct generic_pm_domain * genpd,struct device * dev)238 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
239 {
240 	return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
241 }
242 
genpd_restore_dev(struct generic_pm_domain * genpd,struct device * dev)243 static int genpd_restore_dev(struct generic_pm_domain *genpd,
244 			struct device *dev)
245 {
246 	return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
247 }
248 
genpd_dev_pm_qos_notifier(struct notifier_block * nb,unsigned long val,void * ptr)249 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
250 				     unsigned long val, void *ptr)
251 {
252 	struct generic_pm_domain_data *gpd_data;
253 	struct device *dev;
254 
255 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
256 	dev = gpd_data->base.dev;
257 
258 	for (;;) {
259 		struct generic_pm_domain *genpd;
260 		struct pm_domain_data *pdd;
261 
262 		spin_lock_irq(&dev->power.lock);
263 
264 		pdd = dev->power.subsys_data ?
265 				dev->power.subsys_data->domain_data : NULL;
266 		if (pdd && pdd->dev) {
267 			to_gpd_data(pdd)->td.constraint_changed = true;
268 			genpd = dev_to_genpd(dev);
269 		} else {
270 			genpd = ERR_PTR(-ENODATA);
271 		}
272 
273 		spin_unlock_irq(&dev->power.lock);
274 
275 		if (!IS_ERR(genpd)) {
276 			mutex_lock(&genpd->lock);
277 			genpd->max_off_time_changed = true;
278 			mutex_unlock(&genpd->lock);
279 		}
280 
281 		dev = dev->parent;
282 		if (!dev || dev->power.ignore_children)
283 			break;
284 	}
285 
286 	return NOTIFY_DONE;
287 }
288 
289 /**
290  * genpd_poweroff - Remove power from a given PM domain.
291  * @genpd: PM domain to power down.
292  * @is_async: PM domain is powered down from a scheduled work
293  *
294  * If all of the @genpd's devices have been suspended and all of its subdomains
295  * have been powered down, remove power from @genpd.
296  */
genpd_poweroff(struct generic_pm_domain * genpd,bool is_async)297 static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
298 {
299 	struct pm_domain_data *pdd;
300 	struct gpd_link *link;
301 	unsigned int not_suspended = 0;
302 
303 	/*
304 	 * Do not try to power off the domain in the following situations:
305 	 * (1) The domain is already in the "power off" state.
306 	 * (2) System suspend is in progress.
307 	 */
308 	if (genpd->status == GPD_STATE_POWER_OFF
309 	    || genpd->prepared_count > 0)
310 		return 0;
311 
312 	if (atomic_read(&genpd->sd_count) > 0)
313 		return -EBUSY;
314 
315 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
316 		enum pm_qos_flags_status stat;
317 
318 		stat = dev_pm_qos_flags(pdd->dev,
319 					PM_QOS_FLAG_NO_POWER_OFF
320 						| PM_QOS_FLAG_REMOTE_WAKEUP);
321 		if (stat > PM_QOS_FLAGS_NONE)
322 			return -EBUSY;
323 
324 		if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
325 			not_suspended++;
326 	}
327 
328 	if (not_suspended > 1 || (not_suspended == 1 && is_async))
329 		return -EBUSY;
330 
331 	if (genpd->gov && genpd->gov->power_down_ok) {
332 		if (!genpd->gov->power_down_ok(&genpd->domain))
333 			return -EAGAIN;
334 	}
335 
336 	if (genpd->power_off) {
337 		int ret;
338 
339 		if (atomic_read(&genpd->sd_count) > 0)
340 			return -EBUSY;
341 
342 		/*
343 		 * If sd_count > 0 at this point, one of the subdomains hasn't
344 		 * managed to call genpd_poweron() for the master yet after
345 		 * incrementing it.  In that case genpd_poweron() will wait
346 		 * for us to drop the lock, so we can call .power_off() and let
347 		 * the genpd_poweron() restore power for us (this shouldn't
348 		 * happen very often).
349 		 */
350 		ret = genpd_power_off(genpd, true);
351 		if (ret)
352 			return ret;
353 	}
354 
355 	genpd->status = GPD_STATE_POWER_OFF;
356 
357 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
358 		genpd_sd_counter_dec(link->master);
359 		genpd_queue_power_off_work(link->master);
360 	}
361 
362 	return 0;
363 }
364 
365 /**
366  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
367  * @work: Work structure used for scheduling the execution of this function.
368  */
genpd_power_off_work_fn(struct work_struct * work)369 static void genpd_power_off_work_fn(struct work_struct *work)
370 {
371 	struct generic_pm_domain *genpd;
372 
373 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
374 
375 	mutex_lock(&genpd->lock);
376 	genpd_poweroff(genpd, true);
377 	mutex_unlock(&genpd->lock);
378 }
379 
380 /**
381  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
382  * @dev: Device to suspend.
383  *
384  * Carry out a runtime suspend of a device under the assumption that its
385  * pm_domain field points to the domain member of an object of type
386  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
387  */
pm_genpd_runtime_suspend(struct device * dev)388 static int pm_genpd_runtime_suspend(struct device *dev)
389 {
390 	struct generic_pm_domain *genpd;
391 	bool (*stop_ok)(struct device *__dev);
392 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
393 	bool runtime_pm = pm_runtime_enabled(dev);
394 	ktime_t time_start;
395 	s64 elapsed_ns;
396 	int ret;
397 
398 	dev_dbg(dev, "%s()\n", __func__);
399 
400 	genpd = dev_to_genpd(dev);
401 	if (IS_ERR(genpd))
402 		return -EINVAL;
403 
404 	/*
405 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
406 	 * callbacks for other purposes than runtime PM. In those scenarios
407 	 * runtime PM is disabled. Under these circumstances, we shall skip
408 	 * validating/measuring the PM QoS latency.
409 	 */
410 	stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
411 	if (runtime_pm && stop_ok && !stop_ok(dev))
412 		return -EBUSY;
413 
414 	/* Measure suspend latency. */
415 	if (runtime_pm)
416 		time_start = ktime_get();
417 
418 	ret = genpd_save_dev(genpd, dev);
419 	if (ret)
420 		return ret;
421 
422 	ret = genpd_stop_dev(genpd, dev);
423 	if (ret) {
424 		genpd_restore_dev(genpd, dev);
425 		return ret;
426 	}
427 
428 	/* Update suspend latency value if the measured time exceeds it. */
429 	if (runtime_pm) {
430 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
431 		if (elapsed_ns > td->suspend_latency_ns) {
432 			td->suspend_latency_ns = elapsed_ns;
433 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
434 				elapsed_ns);
435 			genpd->max_off_time_changed = true;
436 			td->constraint_changed = true;
437 		}
438 	}
439 
440 	/*
441 	 * If power.irq_safe is set, this routine will be run with interrupts
442 	 * off, so it can't use mutexes.
443 	 */
444 	if (dev->power.irq_safe)
445 		return 0;
446 
447 	mutex_lock(&genpd->lock);
448 	genpd_poweroff(genpd, false);
449 	mutex_unlock(&genpd->lock);
450 
451 	return 0;
452 }
453 
454 /**
455  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
456  * @dev: Device to resume.
457  *
458  * Carry out a runtime resume of a device under the assumption that its
459  * pm_domain field points to the domain member of an object of type
460  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
461  */
pm_genpd_runtime_resume(struct device * dev)462 static int pm_genpd_runtime_resume(struct device *dev)
463 {
464 	struct generic_pm_domain *genpd;
465 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
466 	bool runtime_pm = pm_runtime_enabled(dev);
467 	ktime_t time_start;
468 	s64 elapsed_ns;
469 	int ret;
470 	bool timed = true;
471 
472 	dev_dbg(dev, "%s()\n", __func__);
473 
474 	genpd = dev_to_genpd(dev);
475 	if (IS_ERR(genpd))
476 		return -EINVAL;
477 
478 	/* If power.irq_safe, the PM domain is never powered off. */
479 	if (dev->power.irq_safe) {
480 		timed = false;
481 		goto out;
482 	}
483 
484 	mutex_lock(&genpd->lock);
485 	ret = __genpd_poweron(genpd);
486 	mutex_unlock(&genpd->lock);
487 
488 	if (ret)
489 		return ret;
490 
491  out:
492 	/* Measure resume latency. */
493 	if (timed && runtime_pm)
494 		time_start = ktime_get();
495 
496 	genpd_start_dev(genpd, dev);
497 	genpd_restore_dev(genpd, dev);
498 
499 	/* Update resume latency value if the measured time exceeds it. */
500 	if (timed && runtime_pm) {
501 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
502 		if (elapsed_ns > td->resume_latency_ns) {
503 			td->resume_latency_ns = elapsed_ns;
504 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
505 				elapsed_ns);
506 			genpd->max_off_time_changed = true;
507 			td->constraint_changed = true;
508 		}
509 	}
510 
511 	return 0;
512 }
513 
514 static bool pd_ignore_unused;
pd_ignore_unused_setup(char * __unused)515 static int __init pd_ignore_unused_setup(char *__unused)
516 {
517 	pd_ignore_unused = true;
518 	return 1;
519 }
520 __setup("pd_ignore_unused", pd_ignore_unused_setup);
521 
522 /**
523  * genpd_poweroff_unused - Power off all PM domains with no devices in use.
524  */
genpd_poweroff_unused(void)525 static int __init genpd_poweroff_unused(void)
526 {
527 	struct generic_pm_domain *genpd;
528 
529 	if (pd_ignore_unused) {
530 		pr_warn("genpd: Not disabling unused power domains\n");
531 		return 0;
532 	}
533 
534 	mutex_lock(&gpd_list_lock);
535 
536 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
537 		genpd_queue_power_off_work(genpd);
538 
539 	mutex_unlock(&gpd_list_lock);
540 
541 	return 0;
542 }
543 late_initcall(genpd_poweroff_unused);
544 
545 #ifdef CONFIG_PM_SLEEP
546 
547 /**
548  * pm_genpd_present - Check if the given PM domain has been initialized.
549  * @genpd: PM domain to check.
550  */
pm_genpd_present(const struct generic_pm_domain * genpd)551 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
552 {
553 	const struct generic_pm_domain *gpd;
554 
555 	if (IS_ERR_OR_NULL(genpd))
556 		return false;
557 
558 	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
559 		if (gpd == genpd)
560 			return true;
561 
562 	return false;
563 }
564 
genpd_dev_active_wakeup(struct generic_pm_domain * genpd,struct device * dev)565 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
566 				    struct device *dev)
567 {
568 	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
569 }
570 
571 /**
572  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
573  * @genpd: PM domain to power off, if possible.
574  * @timed: True if latency measurements are allowed.
575  *
576  * Check if the given PM domain can be powered off (during system suspend or
577  * hibernation) and do that if so.  Also, in that case propagate to its masters.
578  *
579  * This function is only called in "noirq" and "syscore" stages of system power
580  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
581  * executed sequentially, so it is guaranteed that it will never run twice in
582  * parallel).
583  */
pm_genpd_sync_poweroff(struct generic_pm_domain * genpd,bool timed)584 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
585 				   bool timed)
586 {
587 	struct gpd_link *link;
588 
589 	if (genpd->status == GPD_STATE_POWER_OFF)
590 		return;
591 
592 	if (genpd->suspended_count != genpd->device_count
593 	    || atomic_read(&genpd->sd_count) > 0)
594 		return;
595 
596 	genpd_power_off(genpd, timed);
597 
598 	genpd->status = GPD_STATE_POWER_OFF;
599 
600 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
601 		genpd_sd_counter_dec(link->master);
602 		pm_genpd_sync_poweroff(link->master, timed);
603 	}
604 }
605 
606 /**
607  * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
608  * @genpd: PM domain to power on.
609  * @timed: True if latency measurements are allowed.
610  *
611  * This function is only called in "noirq" and "syscore" stages of system power
612  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
613  * executed sequentially, so it is guaranteed that it will never run twice in
614  * parallel).
615  */
pm_genpd_sync_poweron(struct generic_pm_domain * genpd,bool timed)616 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
617 				  bool timed)
618 {
619 	struct gpd_link *link;
620 
621 	if (genpd->status == GPD_STATE_ACTIVE)
622 		return;
623 
624 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
625 		pm_genpd_sync_poweron(link->master, timed);
626 		genpd_sd_counter_inc(link->master);
627 	}
628 
629 	genpd_power_on(genpd, timed);
630 
631 	genpd->status = GPD_STATE_ACTIVE;
632 }
633 
634 /**
635  * resume_needed - Check whether to resume a device before system suspend.
636  * @dev: Device to check.
637  * @genpd: PM domain the device belongs to.
638  *
639  * There are two cases in which a device that can wake up the system from sleep
640  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
641  * to wake up the system and it has to remain active for this purpose while the
642  * system is in the sleep state and (2) if the device is not enabled to wake up
643  * the system from sleep states and it generally doesn't generate wakeup signals
644  * by itself (those signals are generated on its behalf by other parts of the
645  * system).  In the latter case it may be necessary to reconfigure the device's
646  * wakeup settings during system suspend, because it may have been set up to
647  * signal remote wakeup from the system's working state as needed by runtime PM.
648  * Return 'true' in either of the above cases.
649  */
resume_needed(struct device * dev,struct generic_pm_domain * genpd)650 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
651 {
652 	bool active_wakeup;
653 
654 	if (!device_can_wakeup(dev))
655 		return false;
656 
657 	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
658 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
659 }
660 
661 /**
662  * pm_genpd_prepare - Start power transition of a device in a PM domain.
663  * @dev: Device to start the transition of.
664  *
665  * Start a power transition of a device (during a system-wide power transition)
666  * under the assumption that its pm_domain field points to the domain member of
667  * an object of type struct generic_pm_domain representing a PM domain
668  * consisting of I/O devices.
669  */
pm_genpd_prepare(struct device * dev)670 static int pm_genpd_prepare(struct device *dev)
671 {
672 	struct generic_pm_domain *genpd;
673 	int ret;
674 
675 	dev_dbg(dev, "%s()\n", __func__);
676 
677 	genpd = dev_to_genpd(dev);
678 	if (IS_ERR(genpd))
679 		return -EINVAL;
680 
681 	/*
682 	 * If a wakeup request is pending for the device, it should be woken up
683 	 * at this point and a system wakeup event should be reported if it's
684 	 * set up to wake up the system from sleep states.
685 	 */
686 	pm_runtime_get_noresume(dev);
687 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
688 		pm_wakeup_event(dev, 0);
689 
690 	if (pm_wakeup_pending()) {
691 		pm_runtime_put(dev);
692 		return -EBUSY;
693 	}
694 
695 	if (resume_needed(dev, genpd))
696 		pm_runtime_resume(dev);
697 
698 	mutex_lock(&genpd->lock);
699 
700 	if (genpd->prepared_count++ == 0) {
701 		genpd->suspended_count = 0;
702 		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
703 	}
704 
705 	mutex_unlock(&genpd->lock);
706 
707 	if (genpd->suspend_power_off) {
708 		pm_runtime_put_noidle(dev);
709 		return 0;
710 	}
711 
712 	/*
713 	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
714 	 * so genpd_poweron() will return immediately, but if the device
715 	 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
716 	 * to make it operational.
717 	 */
718 	pm_runtime_resume(dev);
719 	__pm_runtime_disable(dev, false);
720 
721 	ret = pm_generic_prepare(dev);
722 	if (ret) {
723 		mutex_lock(&genpd->lock);
724 
725 		if (--genpd->prepared_count == 0)
726 			genpd->suspend_power_off = false;
727 
728 		mutex_unlock(&genpd->lock);
729 		pm_runtime_enable(dev);
730 	}
731 
732 	pm_runtime_put(dev);
733 	return ret;
734 }
735 
736 /**
737  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
738  * @dev: Device to suspend.
739  *
740  * Suspend a device under the assumption that its pm_domain field points to the
741  * domain member of an object of type struct generic_pm_domain representing
742  * a PM domain consisting of I/O devices.
743  */
pm_genpd_suspend(struct device * dev)744 static int pm_genpd_suspend(struct device *dev)
745 {
746 	struct generic_pm_domain *genpd;
747 
748 	dev_dbg(dev, "%s()\n", __func__);
749 
750 	genpd = dev_to_genpd(dev);
751 	if (IS_ERR(genpd))
752 		return -EINVAL;
753 
754 	return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
755 }
756 
757 /**
758  * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
759  * @dev: Device to suspend.
760  *
761  * Carry out a late suspend of a device under the assumption that its
762  * pm_domain field points to the domain member of an object of type
763  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
764  */
pm_genpd_suspend_late(struct device * dev)765 static int pm_genpd_suspend_late(struct device *dev)
766 {
767 	struct generic_pm_domain *genpd;
768 
769 	dev_dbg(dev, "%s()\n", __func__);
770 
771 	genpd = dev_to_genpd(dev);
772 	if (IS_ERR(genpd))
773 		return -EINVAL;
774 
775 	return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
776 }
777 
778 /**
779  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
780  * @dev: Device to suspend.
781  *
782  * Stop the device and remove power from the domain if all devices in it have
783  * been stopped.
784  */
pm_genpd_suspend_noirq(struct device * dev)785 static int pm_genpd_suspend_noirq(struct device *dev)
786 {
787 	struct generic_pm_domain *genpd;
788 
789 	dev_dbg(dev, "%s()\n", __func__);
790 
791 	genpd = dev_to_genpd(dev);
792 	if (IS_ERR(genpd))
793 		return -EINVAL;
794 
795 	if (genpd->suspend_power_off
796 	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
797 		return 0;
798 
799 	genpd_stop_dev(genpd, dev);
800 
801 	/*
802 	 * Since all of the "noirq" callbacks are executed sequentially, it is
803 	 * guaranteed that this function will never run twice in parallel for
804 	 * the same PM domain, so it is not necessary to use locking here.
805 	 */
806 	genpd->suspended_count++;
807 	pm_genpd_sync_poweroff(genpd, true);
808 
809 	return 0;
810 }
811 
812 /**
813  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
814  * @dev: Device to resume.
815  *
816  * Restore power to the device's PM domain, if necessary, and start the device.
817  */
pm_genpd_resume_noirq(struct device * dev)818 static int pm_genpd_resume_noirq(struct device *dev)
819 {
820 	struct generic_pm_domain *genpd;
821 
822 	dev_dbg(dev, "%s()\n", __func__);
823 
824 	genpd = dev_to_genpd(dev);
825 	if (IS_ERR(genpd))
826 		return -EINVAL;
827 
828 	if (genpd->suspend_power_off
829 	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
830 		return 0;
831 
832 	/*
833 	 * Since all of the "noirq" callbacks are executed sequentially, it is
834 	 * guaranteed that this function will never run twice in parallel for
835 	 * the same PM domain, so it is not necessary to use locking here.
836 	 */
837 	pm_genpd_sync_poweron(genpd, true);
838 	genpd->suspended_count--;
839 
840 	return genpd_start_dev(genpd, dev);
841 }
842 
843 /**
844  * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
845  * @dev: Device to resume.
846  *
847  * Carry out an early resume of a device under the assumption that its
848  * pm_domain field points to the domain member of an object of type
849  * struct generic_pm_domain representing a power domain consisting of I/O
850  * devices.
851  */
pm_genpd_resume_early(struct device * dev)852 static int pm_genpd_resume_early(struct device *dev)
853 {
854 	struct generic_pm_domain *genpd;
855 
856 	dev_dbg(dev, "%s()\n", __func__);
857 
858 	genpd = dev_to_genpd(dev);
859 	if (IS_ERR(genpd))
860 		return -EINVAL;
861 
862 	return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
863 }
864 
865 /**
866  * pm_genpd_resume - Resume of device in an I/O PM domain.
867  * @dev: Device to resume.
868  *
869  * Resume a device under the assumption that its pm_domain field points to the
870  * domain member of an object of type struct generic_pm_domain representing
871  * a power domain consisting of I/O devices.
872  */
pm_genpd_resume(struct device * dev)873 static int pm_genpd_resume(struct device *dev)
874 {
875 	struct generic_pm_domain *genpd;
876 
877 	dev_dbg(dev, "%s()\n", __func__);
878 
879 	genpd = dev_to_genpd(dev);
880 	if (IS_ERR(genpd))
881 		return -EINVAL;
882 
883 	return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
884 }
885 
886 /**
887  * pm_genpd_freeze - Freezing a device in an I/O PM domain.
888  * @dev: Device to freeze.
889  *
890  * Freeze a device under the assumption that its pm_domain field points to the
891  * domain member of an object of type struct generic_pm_domain representing
892  * a power domain consisting of I/O devices.
893  */
pm_genpd_freeze(struct device * dev)894 static int pm_genpd_freeze(struct device *dev)
895 {
896 	struct generic_pm_domain *genpd;
897 
898 	dev_dbg(dev, "%s()\n", __func__);
899 
900 	genpd = dev_to_genpd(dev);
901 	if (IS_ERR(genpd))
902 		return -EINVAL;
903 
904 	return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
905 }
906 
907 /**
908  * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
909  * @dev: Device to freeze.
910  *
911  * Carry out a late freeze of a device under the assumption that its
912  * pm_domain field points to the domain member of an object of type
913  * struct generic_pm_domain representing a power domain consisting of I/O
914  * devices.
915  */
pm_genpd_freeze_late(struct device * dev)916 static int pm_genpd_freeze_late(struct device *dev)
917 {
918 	struct generic_pm_domain *genpd;
919 
920 	dev_dbg(dev, "%s()\n", __func__);
921 
922 	genpd = dev_to_genpd(dev);
923 	if (IS_ERR(genpd))
924 		return -EINVAL;
925 
926 	return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
927 }
928 
929 /**
930  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
931  * @dev: Device to freeze.
932  *
933  * Carry out a late freeze of a device under the assumption that its
934  * pm_domain field points to the domain member of an object of type
935  * struct generic_pm_domain representing a power domain consisting of I/O
936  * devices.
937  */
pm_genpd_freeze_noirq(struct device * dev)938 static int pm_genpd_freeze_noirq(struct device *dev)
939 {
940 	struct generic_pm_domain *genpd;
941 
942 	dev_dbg(dev, "%s()\n", __func__);
943 
944 	genpd = dev_to_genpd(dev);
945 	if (IS_ERR(genpd))
946 		return -EINVAL;
947 
948 	return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
949 }
950 
951 /**
952  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
953  * @dev: Device to thaw.
954  *
955  * Start the device, unless power has been removed from the domain already
956  * before the system transition.
957  */
pm_genpd_thaw_noirq(struct device * dev)958 static int pm_genpd_thaw_noirq(struct device *dev)
959 {
960 	struct generic_pm_domain *genpd;
961 
962 	dev_dbg(dev, "%s()\n", __func__);
963 
964 	genpd = dev_to_genpd(dev);
965 	if (IS_ERR(genpd))
966 		return -EINVAL;
967 
968 	return genpd->suspend_power_off ?
969 		0 : genpd_start_dev(genpd, dev);
970 }
971 
972 /**
973  * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
974  * @dev: Device to thaw.
975  *
976  * Carry out an early thaw of a device under the assumption that its
977  * pm_domain field points to the domain member of an object of type
978  * struct generic_pm_domain representing a power domain consisting of I/O
979  * devices.
980  */
pm_genpd_thaw_early(struct device * dev)981 static int pm_genpd_thaw_early(struct device *dev)
982 {
983 	struct generic_pm_domain *genpd;
984 
985 	dev_dbg(dev, "%s()\n", __func__);
986 
987 	genpd = dev_to_genpd(dev);
988 	if (IS_ERR(genpd))
989 		return -EINVAL;
990 
991 	return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
992 }
993 
994 /**
995  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
996  * @dev: Device to thaw.
997  *
998  * Thaw a device under the assumption that its pm_domain field points to the
999  * domain member of an object of type struct generic_pm_domain representing
1000  * a power domain consisting of I/O devices.
1001  */
pm_genpd_thaw(struct device * dev)1002 static int pm_genpd_thaw(struct device *dev)
1003 {
1004 	struct generic_pm_domain *genpd;
1005 
1006 	dev_dbg(dev, "%s()\n", __func__);
1007 
1008 	genpd = dev_to_genpd(dev);
1009 	if (IS_ERR(genpd))
1010 		return -EINVAL;
1011 
1012 	return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1013 }
1014 
1015 /**
1016  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1017  * @dev: Device to resume.
1018  *
1019  * Make sure the domain will be in the same power state as before the
1020  * hibernation the system is resuming from and start the device if necessary.
1021  */
pm_genpd_restore_noirq(struct device * dev)1022 static int pm_genpd_restore_noirq(struct device *dev)
1023 {
1024 	struct generic_pm_domain *genpd;
1025 
1026 	dev_dbg(dev, "%s()\n", __func__);
1027 
1028 	genpd = dev_to_genpd(dev);
1029 	if (IS_ERR(genpd))
1030 		return -EINVAL;
1031 
1032 	/*
1033 	 * Since all of the "noirq" callbacks are executed sequentially, it is
1034 	 * guaranteed that this function will never run twice in parallel for
1035 	 * the same PM domain, so it is not necessary to use locking here.
1036 	 *
1037 	 * At this point suspended_count == 0 means we are being run for the
1038 	 * first time for the given domain in the present cycle.
1039 	 */
1040 	if (genpd->suspended_count++ == 0) {
1041 		/*
1042 		 * The boot kernel might put the domain into arbitrary state,
1043 		 * so make it appear as powered off to pm_genpd_sync_poweron(),
1044 		 * so that it tries to power it on in case it was really off.
1045 		 */
1046 		genpd->status = GPD_STATE_POWER_OFF;
1047 		if (genpd->suspend_power_off) {
1048 			/*
1049 			 * If the domain was off before the hibernation, make
1050 			 * sure it will be off going forward.
1051 			 */
1052 			genpd_power_off(genpd, true);
1053 
1054 			return 0;
1055 		}
1056 	}
1057 
1058 	if (genpd->suspend_power_off)
1059 		return 0;
1060 
1061 	pm_genpd_sync_poweron(genpd, true);
1062 
1063 	return genpd_start_dev(genpd, dev);
1064 }
1065 
1066 /**
1067  * pm_genpd_complete - Complete power transition of a device in a power domain.
1068  * @dev: Device to complete the transition of.
1069  *
1070  * Complete a power transition of a device (during a system-wide power
1071  * transition) under the assumption that its pm_domain field points to the
1072  * domain member of an object of type struct generic_pm_domain representing
1073  * a power domain consisting of I/O devices.
1074  */
pm_genpd_complete(struct device * dev)1075 static void pm_genpd_complete(struct device *dev)
1076 {
1077 	struct generic_pm_domain *genpd;
1078 	bool run_complete;
1079 
1080 	dev_dbg(dev, "%s()\n", __func__);
1081 
1082 	genpd = dev_to_genpd(dev);
1083 	if (IS_ERR(genpd))
1084 		return;
1085 
1086 	mutex_lock(&genpd->lock);
1087 
1088 	run_complete = !genpd->suspend_power_off;
1089 	if (--genpd->prepared_count == 0)
1090 		genpd->suspend_power_off = false;
1091 
1092 	mutex_unlock(&genpd->lock);
1093 
1094 	if (run_complete) {
1095 		pm_generic_complete(dev);
1096 		pm_runtime_set_active(dev);
1097 		pm_runtime_enable(dev);
1098 		pm_request_idle(dev);
1099 	}
1100 }
1101 
1102 /**
1103  * genpd_syscore_switch - Switch power during system core suspend or resume.
1104  * @dev: Device that normally is marked as "always on" to switch power for.
1105  *
1106  * This routine may only be called during the system core (syscore) suspend or
1107  * resume phase for devices whose "always on" flags are set.
1108  */
genpd_syscore_switch(struct device * dev,bool suspend)1109 static void genpd_syscore_switch(struct device *dev, bool suspend)
1110 {
1111 	struct generic_pm_domain *genpd;
1112 
1113 	genpd = dev_to_genpd(dev);
1114 	if (!pm_genpd_present(genpd))
1115 		return;
1116 
1117 	if (suspend) {
1118 		genpd->suspended_count++;
1119 		pm_genpd_sync_poweroff(genpd, false);
1120 	} else {
1121 		pm_genpd_sync_poweron(genpd, false);
1122 		genpd->suspended_count--;
1123 	}
1124 }
1125 
pm_genpd_syscore_poweroff(struct device * dev)1126 void pm_genpd_syscore_poweroff(struct device *dev)
1127 {
1128 	genpd_syscore_switch(dev, true);
1129 }
1130 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1131 
pm_genpd_syscore_poweron(struct device * dev)1132 void pm_genpd_syscore_poweron(struct device *dev)
1133 {
1134 	genpd_syscore_switch(dev, false);
1135 }
1136 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1137 
1138 #else /* !CONFIG_PM_SLEEP */
1139 
1140 #define pm_genpd_prepare		NULL
1141 #define pm_genpd_suspend		NULL
1142 #define pm_genpd_suspend_late		NULL
1143 #define pm_genpd_suspend_noirq		NULL
1144 #define pm_genpd_resume_early		NULL
1145 #define pm_genpd_resume_noirq		NULL
1146 #define pm_genpd_resume			NULL
1147 #define pm_genpd_freeze			NULL
1148 #define pm_genpd_freeze_late		NULL
1149 #define pm_genpd_freeze_noirq		NULL
1150 #define pm_genpd_thaw_early		NULL
1151 #define pm_genpd_thaw_noirq		NULL
1152 #define pm_genpd_thaw			NULL
1153 #define pm_genpd_restore_noirq		NULL
1154 #define pm_genpd_complete		NULL
1155 
1156 #endif /* CONFIG_PM_SLEEP */
1157 
genpd_alloc_dev_data(struct device * dev,struct generic_pm_domain * genpd,struct gpd_timing_data * td)1158 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1159 					struct generic_pm_domain *genpd,
1160 					struct gpd_timing_data *td)
1161 {
1162 	struct generic_pm_domain_data *gpd_data;
1163 	int ret;
1164 
1165 	ret = dev_pm_get_subsys_data(dev);
1166 	if (ret)
1167 		return ERR_PTR(ret);
1168 
1169 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1170 	if (!gpd_data) {
1171 		ret = -ENOMEM;
1172 		goto err_put;
1173 	}
1174 
1175 	if (td)
1176 		gpd_data->td = *td;
1177 
1178 	gpd_data->base.dev = dev;
1179 	gpd_data->td.constraint_changed = true;
1180 	gpd_data->td.effective_constraint_ns = -1;
1181 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1182 
1183 	spin_lock_irq(&dev->power.lock);
1184 
1185 	if (dev->power.subsys_data->domain_data) {
1186 		ret = -EINVAL;
1187 		goto err_free;
1188 	}
1189 
1190 	dev->power.subsys_data->domain_data = &gpd_data->base;
1191 
1192 	spin_unlock_irq(&dev->power.lock);
1193 
1194 	return gpd_data;
1195 
1196  err_free:
1197 	spin_unlock_irq(&dev->power.lock);
1198 	kfree(gpd_data);
1199  err_put:
1200 	dev_pm_put_subsys_data(dev);
1201 	return ERR_PTR(ret);
1202 }
1203 
genpd_free_dev_data(struct device * dev,struct generic_pm_domain_data * gpd_data)1204 static void genpd_free_dev_data(struct device *dev,
1205 				struct generic_pm_domain_data *gpd_data)
1206 {
1207 	spin_lock_irq(&dev->power.lock);
1208 
1209 	dev->power.subsys_data->domain_data = NULL;
1210 
1211 	spin_unlock_irq(&dev->power.lock);
1212 
1213 	kfree(gpd_data);
1214 	dev_pm_put_subsys_data(dev);
1215 }
1216 
1217 /**
1218  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1219  * @genpd: PM domain to add the device to.
1220  * @dev: Device to be added.
1221  * @td: Set of PM QoS timing parameters to attach to the device.
1222  */
__pm_genpd_add_device(struct generic_pm_domain * genpd,struct device * dev,struct gpd_timing_data * td)1223 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1224 			  struct gpd_timing_data *td)
1225 {
1226 	struct generic_pm_domain_data *gpd_data;
1227 	int ret = 0;
1228 
1229 	dev_dbg(dev, "%s()\n", __func__);
1230 
1231 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1232 		return -EINVAL;
1233 
1234 	gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1235 	if (IS_ERR(gpd_data))
1236 		return PTR_ERR(gpd_data);
1237 
1238 	mutex_lock(&genpd->lock);
1239 
1240 	if (genpd->prepared_count > 0) {
1241 		ret = -EAGAIN;
1242 		goto out;
1243 	}
1244 
1245 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1246 	if (ret)
1247 		goto out;
1248 
1249 	dev->pm_domain = &genpd->domain;
1250 
1251 	genpd->device_count++;
1252 	genpd->max_off_time_changed = true;
1253 
1254 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1255 
1256  out:
1257 	mutex_unlock(&genpd->lock);
1258 
1259 	if (ret)
1260 		genpd_free_dev_data(dev, gpd_data);
1261 	else
1262 		dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1263 
1264 	return ret;
1265 }
1266 
1267 /**
1268  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1269  * @genpd: PM domain to remove the device from.
1270  * @dev: Device to be removed.
1271  */
pm_genpd_remove_device(struct generic_pm_domain * genpd,struct device * dev)1272 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1273 			   struct device *dev)
1274 {
1275 	struct generic_pm_domain_data *gpd_data;
1276 	struct pm_domain_data *pdd;
1277 	int ret = 0;
1278 
1279 	dev_dbg(dev, "%s()\n", __func__);
1280 
1281 	if (!genpd || genpd != pm_genpd_lookup_dev(dev))
1282 		return -EINVAL;
1283 
1284 	/* The above validation also means we have existing domain_data. */
1285 	pdd = dev->power.subsys_data->domain_data;
1286 	gpd_data = to_gpd_data(pdd);
1287 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1288 
1289 	mutex_lock(&genpd->lock);
1290 
1291 	if (genpd->prepared_count > 0) {
1292 		ret = -EAGAIN;
1293 		goto out;
1294 	}
1295 
1296 	genpd->device_count--;
1297 	genpd->max_off_time_changed = true;
1298 
1299 	if (genpd->detach_dev)
1300 		genpd->detach_dev(genpd, dev);
1301 
1302 	dev->pm_domain = NULL;
1303 
1304 	list_del_init(&pdd->list_node);
1305 
1306 	mutex_unlock(&genpd->lock);
1307 
1308 	genpd_free_dev_data(dev, gpd_data);
1309 
1310 	return 0;
1311 
1312  out:
1313 	mutex_unlock(&genpd->lock);
1314 	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1315 
1316 	return ret;
1317 }
1318 
1319 /**
1320  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1321  * @genpd: Master PM domain to add the subdomain to.
1322  * @subdomain: Subdomain to be added.
1323  */
pm_genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)1324 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1325 			   struct generic_pm_domain *subdomain)
1326 {
1327 	struct gpd_link *link, *itr;
1328 	int ret = 0;
1329 
1330 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1331 	    || genpd == subdomain)
1332 		return -EINVAL;
1333 
1334 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1335 	if (!link)
1336 		return -ENOMEM;
1337 
1338 	mutex_lock(&genpd->lock);
1339 	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1340 
1341 	if (genpd->status == GPD_STATE_POWER_OFF
1342 	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
1343 		ret = -EINVAL;
1344 		goto out;
1345 	}
1346 
1347 	list_for_each_entry(itr, &genpd->master_links, master_node) {
1348 		if (itr->slave == subdomain && itr->master == genpd) {
1349 			ret = -EINVAL;
1350 			goto out;
1351 		}
1352 	}
1353 
1354 	link->master = genpd;
1355 	list_add_tail(&link->master_node, &genpd->master_links);
1356 	link->slave = subdomain;
1357 	list_add_tail(&link->slave_node, &subdomain->slave_links);
1358 	if (subdomain->status != GPD_STATE_POWER_OFF)
1359 		genpd_sd_counter_inc(genpd);
1360 
1361  out:
1362 	mutex_unlock(&subdomain->lock);
1363 	mutex_unlock(&genpd->lock);
1364 	if (ret)
1365 		kfree(link);
1366 	return ret;
1367 }
1368 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1369 
1370 /**
1371  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1372  * @genpd: Master PM domain to remove the subdomain from.
1373  * @subdomain: Subdomain to be removed.
1374  */
pm_genpd_remove_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)1375 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1376 			      struct generic_pm_domain *subdomain)
1377 {
1378 	struct gpd_link *l, *link;
1379 	int ret = -EINVAL;
1380 
1381 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1382 		return -EINVAL;
1383 
1384 	mutex_lock(&genpd->lock);
1385 
1386 	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1387 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1388 			subdomain->name);
1389 		ret = -EBUSY;
1390 		goto out;
1391 	}
1392 
1393 	list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1394 		if (link->slave != subdomain)
1395 			continue;
1396 
1397 		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1398 
1399 		list_del(&link->master_node);
1400 		list_del(&link->slave_node);
1401 		kfree(link);
1402 		if (subdomain->status != GPD_STATE_POWER_OFF)
1403 			genpd_sd_counter_dec(genpd);
1404 
1405 		mutex_unlock(&subdomain->lock);
1406 
1407 		ret = 0;
1408 		break;
1409 	}
1410 
1411 out:
1412 	mutex_unlock(&genpd->lock);
1413 
1414 	return ret;
1415 }
1416 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1417 
1418 /* Default device callbacks for generic PM domains. */
1419 
1420 /**
1421  * pm_genpd_default_save_state - Default "save device state" for PM domains.
1422  * @dev: Device to handle.
1423  */
pm_genpd_default_save_state(struct device * dev)1424 static int pm_genpd_default_save_state(struct device *dev)
1425 {
1426 	int (*cb)(struct device *__dev);
1427 
1428 	if (dev->type && dev->type->pm)
1429 		cb = dev->type->pm->runtime_suspend;
1430 	else if (dev->class && dev->class->pm)
1431 		cb = dev->class->pm->runtime_suspend;
1432 	else if (dev->bus && dev->bus->pm)
1433 		cb = dev->bus->pm->runtime_suspend;
1434 	else
1435 		cb = NULL;
1436 
1437 	if (!cb && dev->driver && dev->driver->pm)
1438 		cb = dev->driver->pm->runtime_suspend;
1439 
1440 	return cb ? cb(dev) : 0;
1441 }
1442 
1443 /**
1444  * pm_genpd_default_restore_state - Default PM domains "restore device state".
1445  * @dev: Device to handle.
1446  */
pm_genpd_default_restore_state(struct device * dev)1447 static int pm_genpd_default_restore_state(struct device *dev)
1448 {
1449 	int (*cb)(struct device *__dev);
1450 
1451 	if (dev->type && dev->type->pm)
1452 		cb = dev->type->pm->runtime_resume;
1453 	else if (dev->class && dev->class->pm)
1454 		cb = dev->class->pm->runtime_resume;
1455 	else if (dev->bus && dev->bus->pm)
1456 		cb = dev->bus->pm->runtime_resume;
1457 	else
1458 		cb = NULL;
1459 
1460 	if (!cb && dev->driver && dev->driver->pm)
1461 		cb = dev->driver->pm->runtime_resume;
1462 
1463 	return cb ? cb(dev) : 0;
1464 }
1465 
1466 /**
1467  * pm_genpd_init - Initialize a generic I/O PM domain object.
1468  * @genpd: PM domain object to initialize.
1469  * @gov: PM domain governor to associate with the domain (may be NULL).
1470  * @is_off: Initial value of the domain's power_is_off field.
1471  */
pm_genpd_init(struct generic_pm_domain * genpd,struct dev_power_governor * gov,bool is_off)1472 void pm_genpd_init(struct generic_pm_domain *genpd,
1473 		   struct dev_power_governor *gov, bool is_off)
1474 {
1475 	if (IS_ERR_OR_NULL(genpd))
1476 		return;
1477 
1478 	INIT_LIST_HEAD(&genpd->master_links);
1479 	INIT_LIST_HEAD(&genpd->slave_links);
1480 	INIT_LIST_HEAD(&genpd->dev_list);
1481 	mutex_init(&genpd->lock);
1482 	genpd->gov = gov;
1483 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1484 	atomic_set(&genpd->sd_count, 0);
1485 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1486 	genpd->device_count = 0;
1487 	genpd->max_off_time_ns = -1;
1488 	genpd->max_off_time_changed = true;
1489 	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1490 	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1491 	genpd->domain.ops.prepare = pm_genpd_prepare;
1492 	genpd->domain.ops.suspend = pm_genpd_suspend;
1493 	genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1494 	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1495 	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1496 	genpd->domain.ops.resume_early = pm_genpd_resume_early;
1497 	genpd->domain.ops.resume = pm_genpd_resume;
1498 	genpd->domain.ops.freeze = pm_genpd_freeze;
1499 	genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1500 	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1501 	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1502 	genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1503 	genpd->domain.ops.thaw = pm_genpd_thaw;
1504 	genpd->domain.ops.poweroff = pm_genpd_suspend;
1505 	genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1506 	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1507 	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1508 	genpd->domain.ops.restore_early = pm_genpd_resume_early;
1509 	genpd->domain.ops.restore = pm_genpd_resume;
1510 	genpd->domain.ops.complete = pm_genpd_complete;
1511 	genpd->dev_ops.save_state = pm_genpd_default_save_state;
1512 	genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1513 
1514 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1515 		genpd->dev_ops.stop = pm_clk_suspend;
1516 		genpd->dev_ops.start = pm_clk_resume;
1517 	}
1518 
1519 	mutex_lock(&gpd_list_lock);
1520 	list_add(&genpd->gpd_list_node, &gpd_list);
1521 	mutex_unlock(&gpd_list_lock);
1522 }
1523 EXPORT_SYMBOL_GPL(pm_genpd_init);
1524 
1525 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1526 /*
1527  * Device Tree based PM domain providers.
1528  *
1529  * The code below implements generic device tree based PM domain providers that
1530  * bind device tree nodes with generic PM domains registered in the system.
1531  *
1532  * Any driver that registers generic PM domains and needs to support binding of
1533  * devices to these domains is supposed to register a PM domain provider, which
1534  * maps a PM domain specifier retrieved from the device tree to a PM domain.
1535  *
1536  * Two simple mapping functions have been provided for convenience:
1537  *  - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1538  *  - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1539  *    index.
1540  */
1541 
1542 /**
1543  * struct of_genpd_provider - PM domain provider registration structure
1544  * @link: Entry in global list of PM domain providers
1545  * @node: Pointer to device tree node of PM domain provider
1546  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1547  *         into a PM domain.
1548  * @data: context pointer to be passed into @xlate callback
1549  */
1550 struct of_genpd_provider {
1551 	struct list_head link;
1552 	struct device_node *node;
1553 	genpd_xlate_t xlate;
1554 	void *data;
1555 };
1556 
1557 /* List of registered PM domain providers. */
1558 static LIST_HEAD(of_genpd_providers);
1559 /* Mutex to protect the list above. */
1560 static DEFINE_MUTEX(of_genpd_mutex);
1561 
1562 /**
1563  * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1564  * @genpdspec: OF phandle args to map into a PM domain
1565  * @data: xlate function private data - pointer to struct generic_pm_domain
1566  *
1567  * This is a generic xlate function that can be used to model PM domains that
1568  * have their own device tree nodes. The private data of xlate function needs
1569  * to be a valid pointer to struct generic_pm_domain.
1570  */
__of_genpd_xlate_simple(struct of_phandle_args * genpdspec,void * data)1571 struct generic_pm_domain *__of_genpd_xlate_simple(
1572 					struct of_phandle_args *genpdspec,
1573 					void *data)
1574 {
1575 	if (genpdspec->args_count != 0)
1576 		return ERR_PTR(-EINVAL);
1577 	return data;
1578 }
1579 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
1580 
1581 /**
1582  * __of_genpd_xlate_onecell() - Xlate function using a single index.
1583  * @genpdspec: OF phandle args to map into a PM domain
1584  * @data: xlate function private data - pointer to struct genpd_onecell_data
1585  *
1586  * This is a generic xlate function that can be used to model simple PM domain
1587  * controllers that have one device tree node and provide multiple PM domains.
1588  * A single cell is used as an index into an array of PM domains specified in
1589  * the genpd_onecell_data struct when registering the provider.
1590  */
__of_genpd_xlate_onecell(struct of_phandle_args * genpdspec,void * data)1591 struct generic_pm_domain *__of_genpd_xlate_onecell(
1592 					struct of_phandle_args *genpdspec,
1593 					void *data)
1594 {
1595 	struct genpd_onecell_data *genpd_data = data;
1596 	unsigned int idx = genpdspec->args[0];
1597 
1598 	if (genpdspec->args_count != 1)
1599 		return ERR_PTR(-EINVAL);
1600 
1601 	if (idx >= genpd_data->num_domains) {
1602 		pr_err("%s: invalid domain index %u\n", __func__, idx);
1603 		return ERR_PTR(-EINVAL);
1604 	}
1605 
1606 	if (!genpd_data->domains[idx])
1607 		return ERR_PTR(-ENOENT);
1608 
1609 	return genpd_data->domains[idx];
1610 }
1611 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
1612 
1613 /**
1614  * __of_genpd_add_provider() - Register a PM domain provider for a node
1615  * @np: Device node pointer associated with the PM domain provider.
1616  * @xlate: Callback for decoding PM domain from phandle arguments.
1617  * @data: Context pointer for @xlate callback.
1618  */
__of_genpd_add_provider(struct device_node * np,genpd_xlate_t xlate,void * data)1619 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1620 			void *data)
1621 {
1622 	struct of_genpd_provider *cp;
1623 
1624 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1625 	if (!cp)
1626 		return -ENOMEM;
1627 
1628 	cp->node = of_node_get(np);
1629 	cp->data = data;
1630 	cp->xlate = xlate;
1631 
1632 	mutex_lock(&of_genpd_mutex);
1633 	list_add(&cp->link, &of_genpd_providers);
1634 	mutex_unlock(&of_genpd_mutex);
1635 	pr_debug("Added domain provider from %s\n", np->full_name);
1636 
1637 	return 0;
1638 }
1639 EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
1640 
1641 /**
1642  * of_genpd_del_provider() - Remove a previously registered PM domain provider
1643  * @np: Device node pointer associated with the PM domain provider
1644  */
of_genpd_del_provider(struct device_node * np)1645 void of_genpd_del_provider(struct device_node *np)
1646 {
1647 	struct of_genpd_provider *cp, *tmp;
1648 
1649 	mutex_lock(&of_genpd_mutex);
1650 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
1651 		if (cp->node == np) {
1652 			list_del(&cp->link);
1653 			of_node_put(cp->node);
1654 			kfree(cp);
1655 			break;
1656 		}
1657 	}
1658 	mutex_unlock(&of_genpd_mutex);
1659 }
1660 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1661 
1662 /**
1663  * of_genpd_get_from_provider() - Look-up PM domain
1664  * @genpdspec: OF phandle args to use for look-up
1665  *
1666  * Looks for a PM domain provider under the node specified by @genpdspec and if
1667  * found, uses xlate function of the provider to map phandle args to a PM
1668  * domain.
1669  *
1670  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1671  * on failure.
1672  */
of_genpd_get_from_provider(struct of_phandle_args * genpdspec)1673 struct generic_pm_domain *of_genpd_get_from_provider(
1674 					struct of_phandle_args *genpdspec)
1675 {
1676 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1677 	struct of_genpd_provider *provider;
1678 
1679 	mutex_lock(&of_genpd_mutex);
1680 
1681 	/* Check if we have such a provider in our array */
1682 	list_for_each_entry(provider, &of_genpd_providers, link) {
1683 		if (provider->node == genpdspec->np)
1684 			genpd = provider->xlate(genpdspec, provider->data);
1685 		if (!IS_ERR(genpd))
1686 			break;
1687 	}
1688 
1689 	mutex_unlock(&of_genpd_mutex);
1690 
1691 	return genpd;
1692 }
1693 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
1694 
1695 /**
1696  * genpd_dev_pm_detach - Detach a device from its PM domain.
1697  * @dev: Device to detach.
1698  * @power_off: Currently not used
1699  *
1700  * Try to locate a corresponding generic PM domain, which the device was
1701  * attached to previously. If such is found, the device is detached from it.
1702  */
genpd_dev_pm_detach(struct device * dev,bool power_off)1703 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1704 {
1705 	struct generic_pm_domain *pd;
1706 	unsigned int i;
1707 	int ret = 0;
1708 
1709 	pd = pm_genpd_lookup_dev(dev);
1710 	if (!pd)
1711 		return;
1712 
1713 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1714 
1715 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1716 		ret = pm_genpd_remove_device(pd, dev);
1717 		if (ret != -EAGAIN)
1718 			break;
1719 
1720 		mdelay(i);
1721 		cond_resched();
1722 	}
1723 
1724 	if (ret < 0) {
1725 		dev_err(dev, "failed to remove from PM domain %s: %d",
1726 			pd->name, ret);
1727 		return;
1728 	}
1729 
1730 	/* Check if PM domain can be powered off after removing this device. */
1731 	genpd_queue_power_off_work(pd);
1732 }
1733 
genpd_dev_pm_sync(struct device * dev)1734 static void genpd_dev_pm_sync(struct device *dev)
1735 {
1736 	struct generic_pm_domain *pd;
1737 
1738 	pd = dev_to_genpd(dev);
1739 	if (IS_ERR(pd))
1740 		return;
1741 
1742 	genpd_queue_power_off_work(pd);
1743 }
1744 
1745 /**
1746  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
1747  * @dev: Device to attach.
1748  *
1749  * Parse device's OF node to find a PM domain specifier. If such is found,
1750  * attaches the device to retrieved pm_domain ops.
1751  *
1752  * Both generic and legacy Samsung-specific DT bindings are supported to keep
1753  * backwards compatibility with existing DTBs.
1754  *
1755  * Returns 0 on successfully attached PM domain or negative error code. Note
1756  * that if a power-domain exists for the device, but it cannot be found or
1757  * turned on, then return -EPROBE_DEFER to ensure that the device is not
1758  * probed and to re-try again later.
1759  */
genpd_dev_pm_attach(struct device * dev)1760 int genpd_dev_pm_attach(struct device *dev)
1761 {
1762 	struct of_phandle_args pd_args;
1763 	struct generic_pm_domain *pd;
1764 	unsigned int i;
1765 	int ret;
1766 
1767 	if (!dev->of_node)
1768 		return -ENODEV;
1769 
1770 	if (dev->pm_domain)
1771 		return -EEXIST;
1772 
1773 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
1774 					"#power-domain-cells", 0, &pd_args);
1775 	if (ret < 0) {
1776 		if (ret != -ENOENT)
1777 			return ret;
1778 
1779 		/*
1780 		 * Try legacy Samsung-specific bindings
1781 		 * (for backwards compatibility of DT ABI)
1782 		 */
1783 		pd_args.args_count = 0;
1784 		pd_args.np = of_parse_phandle(dev->of_node,
1785 						"samsung,power-domain", 0);
1786 		if (!pd_args.np)
1787 			return -ENOENT;
1788 	}
1789 
1790 	pd = of_genpd_get_from_provider(&pd_args);
1791 	of_node_put(pd_args.np);
1792 	if (IS_ERR(pd)) {
1793 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
1794 			__func__, PTR_ERR(pd));
1795 		return -EPROBE_DEFER;
1796 	}
1797 
1798 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
1799 
1800 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1801 		ret = pm_genpd_add_device(pd, dev);
1802 		if (ret != -EAGAIN)
1803 			break;
1804 
1805 		mdelay(i);
1806 		cond_resched();
1807 	}
1808 
1809 	if (ret < 0) {
1810 		dev_err(dev, "failed to add to PM domain %s: %d",
1811 			pd->name, ret);
1812 		goto out;
1813 	}
1814 
1815 	dev->pm_domain->detach = genpd_dev_pm_detach;
1816 	dev->pm_domain->sync = genpd_dev_pm_sync;
1817 	ret = genpd_poweron(pd);
1818 
1819 out:
1820 	return ret ? -EPROBE_DEFER : 0;
1821 }
1822 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
1823 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
1824 
1825 
1826 /***        debugfs support        ***/
1827 
1828 #ifdef CONFIG_PM_ADVANCED_DEBUG
1829 #include <linux/pm.h>
1830 #include <linux/device.h>
1831 #include <linux/debugfs.h>
1832 #include <linux/seq_file.h>
1833 #include <linux/init.h>
1834 #include <linux/kobject.h>
1835 static struct dentry *pm_genpd_debugfs_dir;
1836 
1837 /*
1838  * TODO: This function is a slightly modified version of rtpm_status_show
1839  * from sysfs.c, so generalize it.
1840  */
rtpm_status_str(struct seq_file * s,struct device * dev)1841 static void rtpm_status_str(struct seq_file *s, struct device *dev)
1842 {
1843 	static const char * const status_lookup[] = {
1844 		[RPM_ACTIVE] = "active",
1845 		[RPM_RESUMING] = "resuming",
1846 		[RPM_SUSPENDED] = "suspended",
1847 		[RPM_SUSPENDING] = "suspending"
1848 	};
1849 	const char *p = "";
1850 
1851 	if (dev->power.runtime_error)
1852 		p = "error";
1853 	else if (dev->power.disable_depth)
1854 		p = "unsupported";
1855 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
1856 		p = status_lookup[dev->power.runtime_status];
1857 	else
1858 		WARN_ON(1);
1859 
1860 	seq_puts(s, p);
1861 }
1862 
pm_genpd_summary_one(struct seq_file * s,struct generic_pm_domain * genpd)1863 static int pm_genpd_summary_one(struct seq_file *s,
1864 				struct generic_pm_domain *genpd)
1865 {
1866 	static const char * const status_lookup[] = {
1867 		[GPD_STATE_ACTIVE] = "on",
1868 		[GPD_STATE_POWER_OFF] = "off"
1869 	};
1870 	struct pm_domain_data *pm_data;
1871 	const char *kobj_path;
1872 	struct gpd_link *link;
1873 	int ret;
1874 
1875 	ret = mutex_lock_interruptible(&genpd->lock);
1876 	if (ret)
1877 		return -ERESTARTSYS;
1878 
1879 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
1880 		goto exit;
1881 	seq_printf(s, "%-30s  %-15s ", genpd->name, status_lookup[genpd->status]);
1882 
1883 	/*
1884 	 * Modifications on the list require holding locks on both
1885 	 * master and slave, so we are safe.
1886 	 * Also genpd->name is immutable.
1887 	 */
1888 	list_for_each_entry(link, &genpd->master_links, master_node) {
1889 		seq_printf(s, "%s", link->slave->name);
1890 		if (!list_is_last(&link->master_node, &genpd->master_links))
1891 			seq_puts(s, ", ");
1892 	}
1893 
1894 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
1895 		kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
1896 		if (kobj_path == NULL)
1897 			continue;
1898 
1899 		seq_printf(s, "\n    %-50s  ", kobj_path);
1900 		rtpm_status_str(s, pm_data->dev);
1901 		kfree(kobj_path);
1902 	}
1903 
1904 	seq_puts(s, "\n");
1905 exit:
1906 	mutex_unlock(&genpd->lock);
1907 
1908 	return 0;
1909 }
1910 
pm_genpd_summary_show(struct seq_file * s,void * data)1911 static int pm_genpd_summary_show(struct seq_file *s, void *data)
1912 {
1913 	struct generic_pm_domain *genpd;
1914 	int ret = 0;
1915 
1916 	seq_puts(s, "domain                          status          slaves\n");
1917 	seq_puts(s, "    /device                                             runtime status\n");
1918 	seq_puts(s, "----------------------------------------------------------------------\n");
1919 
1920 	ret = mutex_lock_interruptible(&gpd_list_lock);
1921 	if (ret)
1922 		return -ERESTARTSYS;
1923 
1924 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
1925 		ret = pm_genpd_summary_one(s, genpd);
1926 		if (ret)
1927 			break;
1928 	}
1929 	mutex_unlock(&gpd_list_lock);
1930 
1931 	return ret;
1932 }
1933 
pm_genpd_summary_open(struct inode * inode,struct file * file)1934 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
1935 {
1936 	return single_open(file, pm_genpd_summary_show, NULL);
1937 }
1938 
1939 static const struct file_operations pm_genpd_summary_fops = {
1940 	.open = pm_genpd_summary_open,
1941 	.read = seq_read,
1942 	.llseek = seq_lseek,
1943 	.release = single_release,
1944 };
1945 
pm_genpd_debug_init(void)1946 static int __init pm_genpd_debug_init(void)
1947 {
1948 	struct dentry *d;
1949 
1950 	pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
1951 
1952 	if (!pm_genpd_debugfs_dir)
1953 		return -ENOMEM;
1954 
1955 	d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
1956 			pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
1957 	if (!d)
1958 		return -ENOMEM;
1959 
1960 	return 0;
1961 }
1962 late_initcall(pm_genpd_debug_init);
1963 
pm_genpd_debug_exit(void)1964 static void __exit pm_genpd_debug_exit(void)
1965 {
1966 	debugfs_remove_recursive(pm_genpd_debugfs_dir);
1967 }
1968 __exitcall(pm_genpd_debug_exit);
1969 #endif /* CONFIG_PM_ADVANCED_DEBUG */
1970