• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/domain.c - Common code related to device power domains.
4  *
5  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_domain.h>
16 #include <linux/pm_qos.h>
17 #include <linux/pm_clock.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/export.h>
23 #include <linux/cpu.h>
24 #include <linux/debugfs.h>
25 
26 #include "power.h"
27 
28 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
29 
30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
31 ({								\
32 	type (*__routine)(struct device *__d); 			\
33 	type __ret = (type)0;					\
34 								\
35 	__routine = genpd->dev_ops.callback; 			\
36 	if (__routine) {					\
37 		__ret = __routine(dev); 			\
38 	}							\
39 	__ret;							\
40 })
41 
42 static LIST_HEAD(gpd_list);
43 static DEFINE_MUTEX(gpd_list_lock);
44 
45 struct genpd_lock_ops {
46 	void (*lock)(struct generic_pm_domain *genpd);
47 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
49 	void (*unlock)(struct generic_pm_domain *genpd);
50 };
51 
genpd_lock_mtx(struct generic_pm_domain * genpd)52 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
53 {
54 	mutex_lock(&genpd->mlock);
55 }
56 
genpd_lock_nested_mtx(struct generic_pm_domain * genpd,int depth)57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
58 					int depth)
59 {
60 	mutex_lock_nested(&genpd->mlock, depth);
61 }
62 
genpd_lock_interruptible_mtx(struct generic_pm_domain * genpd)63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
64 {
65 	return mutex_lock_interruptible(&genpd->mlock);
66 }
67 
genpd_unlock_mtx(struct generic_pm_domain * genpd)68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
69 {
70 	return mutex_unlock(&genpd->mlock);
71 }
72 
73 static const struct genpd_lock_ops genpd_mtx_ops = {
74 	.lock = genpd_lock_mtx,
75 	.lock_nested = genpd_lock_nested_mtx,
76 	.lock_interruptible = genpd_lock_interruptible_mtx,
77 	.unlock = genpd_unlock_mtx,
78 };
79 
genpd_lock_spin(struct generic_pm_domain * genpd)80 static void genpd_lock_spin(struct generic_pm_domain *genpd)
81 	__acquires(&genpd->slock)
82 {
83 	unsigned long flags;
84 
85 	spin_lock_irqsave(&genpd->slock, flags);
86 	genpd->lock_flags = flags;
87 }
88 
genpd_lock_nested_spin(struct generic_pm_domain * genpd,int depth)89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
90 					int depth)
91 	__acquires(&genpd->slock)
92 {
93 	unsigned long flags;
94 
95 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
96 	genpd->lock_flags = flags;
97 }
98 
genpd_lock_interruptible_spin(struct generic_pm_domain * genpd)99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
100 	__acquires(&genpd->slock)
101 {
102 	unsigned long flags;
103 
104 	spin_lock_irqsave(&genpd->slock, flags);
105 	genpd->lock_flags = flags;
106 	return 0;
107 }
108 
genpd_unlock_spin(struct generic_pm_domain * genpd)109 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
110 	__releases(&genpd->slock)
111 {
112 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
113 }
114 
115 static const struct genpd_lock_ops genpd_spin_ops = {
116 	.lock = genpd_lock_spin,
117 	.lock_nested = genpd_lock_nested_spin,
118 	.lock_interruptible = genpd_lock_interruptible_spin,
119 	.unlock = genpd_unlock_spin,
120 };
121 
122 #define genpd_lock(p)			p->lock_ops->lock(p)
123 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
124 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
125 #define genpd_unlock(p)			p->lock_ops->unlock(p)
126 
127 #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
128 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
129 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
130 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
131 #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
132 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
133 
irq_safe_dev_in_no_sleep_domain(struct device * dev,const struct generic_pm_domain * genpd)134 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
135 		const struct generic_pm_domain *genpd)
136 {
137 	bool ret;
138 
139 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
140 
141 	/*
142 	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
143 	 * to indicate a suboptimal configuration for PM. For an always on
144 	 * domain this isn't case, thus don't warn.
145 	 */
146 	if (ret && !genpd_is_always_on(genpd))
147 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
148 				genpd->name);
149 
150 	return ret;
151 }
152 
153 static int genpd_runtime_suspend(struct device *dev);
154 
155 /*
156  * Get the generic PM domain for a particular struct device.
157  * This validates the struct device pointer, the PM domain pointer,
158  * and checks that the PM domain pointer is a real generic PM domain.
159  * Any failure results in NULL being returned.
160  */
dev_to_genpd_safe(struct device * dev)161 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
162 {
163 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
164 		return NULL;
165 
166 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
167 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
168 		return pd_to_genpd(dev->pm_domain);
169 
170 	return NULL;
171 }
172 
173 /*
174  * This should only be used where we are certain that the pm_domain
175  * attached to the device is a genpd domain.
176  */
dev_to_genpd(struct device * dev)177 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
178 {
179 	if (IS_ERR_OR_NULL(dev->pm_domain))
180 		return ERR_PTR(-EINVAL);
181 
182 	return pd_to_genpd(dev->pm_domain);
183 }
184 
genpd_stop_dev(const struct generic_pm_domain * genpd,struct device * dev)185 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
186 			  struct device *dev)
187 {
188 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
189 }
190 
genpd_start_dev(const struct generic_pm_domain * genpd,struct device * dev)191 static int genpd_start_dev(const struct generic_pm_domain *genpd,
192 			   struct device *dev)
193 {
194 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
195 }
196 
genpd_sd_counter_dec(struct generic_pm_domain * genpd)197 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
198 {
199 	bool ret = false;
200 
201 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
202 		ret = !!atomic_dec_and_test(&genpd->sd_count);
203 
204 	return ret;
205 }
206 
genpd_sd_counter_inc(struct generic_pm_domain * genpd)207 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
208 {
209 	atomic_inc(&genpd->sd_count);
210 	smp_mb__after_atomic();
211 }
212 
213 #ifdef CONFIG_DEBUG_FS
214 static struct dentry *genpd_debugfs_dir;
215 
216 static void genpd_debug_add(struct generic_pm_domain *genpd);
217 
genpd_debug_remove(struct generic_pm_domain * genpd)218 static void genpd_debug_remove(struct generic_pm_domain *genpd)
219 {
220 	struct dentry *d;
221 
222 	if (!genpd_debugfs_dir)
223 		return;
224 
225 	d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
226 	debugfs_remove(d);
227 }
228 
genpd_update_accounting(struct generic_pm_domain * genpd)229 static void genpd_update_accounting(struct generic_pm_domain *genpd)
230 {
231 	ktime_t delta, now;
232 
233 	now = ktime_get();
234 	delta = ktime_sub(now, genpd->accounting_time);
235 
236 	/*
237 	 * If genpd->status is active, it means we are just
238 	 * out of off and so update the idle time and vice
239 	 * versa.
240 	 */
241 	if (genpd->status == GENPD_STATE_ON) {
242 		int state_idx = genpd->state_idx;
243 
244 		genpd->states[state_idx].idle_time =
245 			ktime_add(genpd->states[state_idx].idle_time, delta);
246 	} else {
247 		genpd->on_time = ktime_add(genpd->on_time, delta);
248 	}
249 
250 	genpd->accounting_time = now;
251 }
252 #else
genpd_debug_add(struct generic_pm_domain * genpd)253 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
genpd_debug_remove(struct generic_pm_domain * genpd)254 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
genpd_update_accounting(struct generic_pm_domain * genpd)255 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
256 #endif
257 
_genpd_reeval_performance_state(struct generic_pm_domain * genpd,unsigned int state)258 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
259 					   unsigned int state)
260 {
261 	struct generic_pm_domain_data *pd_data;
262 	struct pm_domain_data *pdd;
263 	struct gpd_link *link;
264 
265 	/* New requested state is same as Max requested state */
266 	if (state == genpd->performance_state)
267 		return state;
268 
269 	/* New requested state is higher than Max requested state */
270 	if (state > genpd->performance_state)
271 		return state;
272 
273 	/* Traverse all devices within the domain */
274 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
275 		pd_data = to_gpd_data(pdd);
276 
277 		if (pd_data->performance_state > state)
278 			state = pd_data->performance_state;
279 	}
280 
281 	/*
282 	 * Traverse all sub-domains within the domain. This can be
283 	 * done without any additional locking as the link->performance_state
284 	 * field is protected by the parent genpd->lock, which is already taken.
285 	 *
286 	 * Also note that link->performance_state (subdomain's performance state
287 	 * requirement to parent domain) is different from
288 	 * link->child->performance_state (current performance state requirement
289 	 * of the devices/sub-domains of the subdomain) and so can have a
290 	 * different value.
291 	 *
292 	 * Note that we also take vote from powered-off sub-domains into account
293 	 * as the same is done for devices right now.
294 	 */
295 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
296 		if (link->performance_state > state)
297 			state = link->performance_state;
298 	}
299 
300 	return state;
301 }
302 
_genpd_set_performance_state(struct generic_pm_domain * genpd,unsigned int state,int depth)303 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
304 					unsigned int state, int depth)
305 {
306 	struct generic_pm_domain *parent;
307 	struct gpd_link *link;
308 	int parent_state, ret;
309 
310 	if (state == genpd->performance_state)
311 		return 0;
312 
313 	/* Propagate to parents of genpd */
314 	list_for_each_entry(link, &genpd->child_links, child_node) {
315 		parent = link->parent;
316 
317 		if (!parent->set_performance_state)
318 			continue;
319 
320 		/* Find parent's performance state */
321 		ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
322 							 parent->opp_table,
323 							 state);
324 		if (unlikely(ret < 0))
325 			goto err;
326 
327 		parent_state = ret;
328 
329 		genpd_lock_nested(parent, depth + 1);
330 
331 		link->prev_performance_state = link->performance_state;
332 		link->performance_state = parent_state;
333 		parent_state = _genpd_reeval_performance_state(parent,
334 						parent_state);
335 		ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
336 		if (ret)
337 			link->performance_state = link->prev_performance_state;
338 
339 		genpd_unlock(parent);
340 
341 		if (ret)
342 			goto err;
343 	}
344 
345 	ret = genpd->set_performance_state(genpd, state);
346 	if (ret)
347 		goto err;
348 
349 	genpd->performance_state = state;
350 	return 0;
351 
352 err:
353 	/* Encountered an error, lets rollback */
354 	list_for_each_entry_continue_reverse(link, &genpd->child_links,
355 					     child_node) {
356 		parent = link->parent;
357 
358 		if (!parent->set_performance_state)
359 			continue;
360 
361 		genpd_lock_nested(parent, depth + 1);
362 
363 		parent_state = link->prev_performance_state;
364 		link->performance_state = parent_state;
365 
366 		parent_state = _genpd_reeval_performance_state(parent,
367 						parent_state);
368 		if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
369 			pr_err("%s: Failed to roll back to %d performance state\n",
370 			       parent->name, parent_state);
371 		}
372 
373 		genpd_unlock(parent);
374 	}
375 
376 	return ret;
377 }
378 
379 /**
380  * dev_pm_genpd_set_performance_state- Set performance state of device's power
381  * domain.
382  *
383  * @dev: Device for which the performance-state needs to be set.
384  * @state: Target performance state of the device. This can be set as 0 when the
385  *	   device doesn't have any performance state constraints left (And so
386  *	   the device wouldn't participate anymore to find the target
387  *	   performance state of the genpd).
388  *
389  * It is assumed that the users guarantee that the genpd wouldn't be detached
390  * while this routine is getting called.
391  *
392  * Returns 0 on success and negative error values on failures.
393  */
dev_pm_genpd_set_performance_state(struct device * dev,unsigned int state)394 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
395 {
396 	struct generic_pm_domain *genpd;
397 	struct generic_pm_domain_data *gpd_data;
398 	unsigned int prev;
399 	int ret;
400 
401 	genpd = dev_to_genpd_safe(dev);
402 	if (!genpd)
403 		return -ENODEV;
404 
405 	if (unlikely(!genpd->set_performance_state))
406 		return -EINVAL;
407 
408 	if (WARN_ON(!dev->power.subsys_data ||
409 		     !dev->power.subsys_data->domain_data))
410 		return -EINVAL;
411 
412 	genpd_lock(genpd);
413 
414 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
415 	prev = gpd_data->performance_state;
416 	gpd_data->performance_state = state;
417 
418 	state = _genpd_reeval_performance_state(genpd, state);
419 	ret = _genpd_set_performance_state(genpd, state, 0);
420 	if (ret)
421 		gpd_data->performance_state = prev;
422 
423 	genpd_unlock(genpd);
424 
425 	return ret;
426 }
427 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
428 
429 /**
430  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
431  *
432  * @dev: Device to handle
433  * @next: impending interrupt/wakeup for the device
434  *
435  *
436  * Allow devices to inform of the next wakeup. It's assumed that the users
437  * guarantee that the genpd wouldn't be detached while this routine is getting
438  * called. Additionally, it's also assumed that @dev isn't runtime suspended
439  * (RPM_SUSPENDED)."
440  * Although devices are expected to update the next_wakeup after the end of
441  * their usecase as well, it is possible the devices themselves may not know
442  * about that, so stale @next will be ignored when powering off the domain.
443  */
dev_pm_genpd_set_next_wakeup(struct device * dev,ktime_t next)444 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
445 {
446 	struct generic_pm_domain_data *gpd_data;
447 	struct generic_pm_domain *genpd;
448 
449 	genpd = dev_to_genpd_safe(dev);
450 	if (!genpd)
451 		return;
452 
453 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
454 	gpd_data->next_wakeup = next;
455 }
456 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
457 
_genpd_power_on(struct generic_pm_domain * genpd,bool timed)458 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
459 {
460 	unsigned int state_idx = genpd->state_idx;
461 	ktime_t time_start;
462 	s64 elapsed_ns;
463 	int ret;
464 
465 	/* Notify consumers that we are about to power on. */
466 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
467 					     GENPD_NOTIFY_PRE_ON,
468 					     GENPD_NOTIFY_OFF, NULL);
469 	ret = notifier_to_errno(ret);
470 	if (ret)
471 		return ret;
472 
473 	if (!genpd->power_on)
474 		goto out;
475 
476 	if (!timed) {
477 		ret = genpd->power_on(genpd);
478 		if (ret)
479 			goto err;
480 
481 		goto out;
482 	}
483 
484 	time_start = ktime_get();
485 	ret = genpd->power_on(genpd);
486 	if (ret)
487 		goto err;
488 
489 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
490 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
491 		goto out;
492 
493 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
494 	genpd->max_off_time_changed = true;
495 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
496 		 genpd->name, "on", elapsed_ns);
497 
498 out:
499 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
500 	return 0;
501 err:
502 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
503 				NULL);
504 	return ret;
505 }
506 
_genpd_power_off(struct generic_pm_domain * genpd,bool timed)507 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
508 {
509 	unsigned int state_idx = genpd->state_idx;
510 	ktime_t time_start;
511 	s64 elapsed_ns;
512 	int ret;
513 
514 	/* Notify consumers that we are about to power off. */
515 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
516 					     GENPD_NOTIFY_PRE_OFF,
517 					     GENPD_NOTIFY_ON, NULL);
518 	ret = notifier_to_errno(ret);
519 	if (ret)
520 		return ret;
521 
522 	if (!genpd->power_off)
523 		goto out;
524 
525 	if (!timed) {
526 		ret = genpd->power_off(genpd);
527 		if (ret)
528 			goto busy;
529 
530 		goto out;
531 	}
532 
533 	time_start = ktime_get();
534 	ret = genpd->power_off(genpd);
535 	if (ret)
536 		goto busy;
537 
538 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
539 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
540 		goto out;
541 
542 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
543 	genpd->max_off_time_changed = true;
544 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
545 		 genpd->name, "off", elapsed_ns);
546 
547 out:
548 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
549 				NULL);
550 	return 0;
551 busy:
552 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
553 	return ret;
554 }
555 
556 /**
557  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
558  * @genpd: PM domain to power off.
559  *
560  * Queue up the execution of genpd_power_off() unless it's already been done
561  * before.
562  */
genpd_queue_power_off_work(struct generic_pm_domain * genpd)563 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
564 {
565 	queue_work(pm_wq, &genpd->power_off_work);
566 }
567 
568 /**
569  * genpd_power_off - Remove power from a given PM domain.
570  * @genpd: PM domain to power down.
571  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
572  * RPM status of the releated device is in an intermediate state, not yet turned
573  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
574  * be RPM_SUSPENDED, while it tries to power off the PM domain.
575  *
576  * If all of the @genpd's devices have been suspended and all of its subdomains
577  * have been powered down, remove power from @genpd.
578  */
genpd_power_off(struct generic_pm_domain * genpd,bool one_dev_on,unsigned int depth)579 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
580 			   unsigned int depth)
581 {
582 	struct pm_domain_data *pdd;
583 	struct gpd_link *link;
584 	unsigned int not_suspended = 0;
585 	int ret;
586 
587 	/*
588 	 * Do not try to power off the domain in the following situations:
589 	 * (1) The domain is already in the "power off" state.
590 	 * (2) System suspend is in progress.
591 	 */
592 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
593 		return 0;
594 
595 	/*
596 	 * Abort power off for the PM domain in the following situations:
597 	 * (1) The domain is configured as always on.
598 	 * (2) When the domain has a subdomain being powered on.
599 	 */
600 	if (genpd_is_always_on(genpd) ||
601 			genpd_is_rpm_always_on(genpd) ||
602 			atomic_read(&genpd->sd_count) > 0)
603 		return -EBUSY;
604 
605 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
606 		enum pm_qos_flags_status stat;
607 
608 		stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
609 		if (stat > PM_QOS_FLAGS_NONE)
610 			return -EBUSY;
611 
612 		/*
613 		 * Do not allow PM domain to be powered off, when an IRQ safe
614 		 * device is part of a non-IRQ safe domain.
615 		 */
616 		if (!pm_runtime_suspended(pdd->dev) ||
617 			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
618 			not_suspended++;
619 	}
620 
621 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
622 		return -EBUSY;
623 
624 	if (genpd->gov && genpd->gov->power_down_ok) {
625 		if (!genpd->gov->power_down_ok(&genpd->domain))
626 			return -EAGAIN;
627 	}
628 
629 	/* Default to shallowest state. */
630 	if (!genpd->gov)
631 		genpd->state_idx = 0;
632 
633 	/* Don't power off, if a child domain is waiting to power on. */
634 	if (atomic_read(&genpd->sd_count) > 0)
635 		return -EBUSY;
636 
637 	ret = _genpd_power_off(genpd, true);
638 	if (ret) {
639 		genpd->states[genpd->state_idx].rejected++;
640 		return ret;
641 	}
642 
643 	genpd->status = GENPD_STATE_OFF;
644 	genpd_update_accounting(genpd);
645 	genpd->states[genpd->state_idx].usage++;
646 
647 	list_for_each_entry(link, &genpd->child_links, child_node) {
648 		genpd_sd_counter_dec(link->parent);
649 		genpd_lock_nested(link->parent, depth + 1);
650 		genpd_power_off(link->parent, false, depth + 1);
651 		genpd_unlock(link->parent);
652 	}
653 
654 	return 0;
655 }
656 
657 /**
658  * genpd_power_on - Restore power to a given PM domain and its parents.
659  * @genpd: PM domain to power up.
660  * @depth: nesting count for lockdep.
661  *
662  * Restore power to @genpd and all of its parents so that it is possible to
663  * resume a device belonging to it.
664  */
genpd_power_on(struct generic_pm_domain * genpd,unsigned int depth)665 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
666 {
667 	struct gpd_link *link;
668 	int ret = 0;
669 
670 	if (genpd_status_on(genpd))
671 		return 0;
672 
673 	/*
674 	 * The list is guaranteed not to change while the loop below is being
675 	 * executed, unless one of the parents' .power_on() callbacks fiddles
676 	 * with it.
677 	 */
678 	list_for_each_entry(link, &genpd->child_links, child_node) {
679 		struct generic_pm_domain *parent = link->parent;
680 
681 		genpd_sd_counter_inc(parent);
682 
683 		genpd_lock_nested(parent, depth + 1);
684 		ret = genpd_power_on(parent, depth + 1);
685 		genpd_unlock(parent);
686 
687 		if (ret) {
688 			genpd_sd_counter_dec(parent);
689 			goto err;
690 		}
691 	}
692 
693 	ret = _genpd_power_on(genpd, true);
694 	if (ret)
695 		goto err;
696 
697 	genpd->status = GENPD_STATE_ON;
698 	genpd_update_accounting(genpd);
699 
700 	return 0;
701 
702  err:
703 	list_for_each_entry_continue_reverse(link,
704 					&genpd->child_links,
705 					child_node) {
706 		genpd_sd_counter_dec(link->parent);
707 		genpd_lock_nested(link->parent, depth + 1);
708 		genpd_power_off(link->parent, false, depth + 1);
709 		genpd_unlock(link->parent);
710 	}
711 
712 	return ret;
713 }
714 
genpd_dev_pm_start(struct device * dev)715 static int genpd_dev_pm_start(struct device *dev)
716 {
717 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
718 
719 	return genpd_start_dev(genpd, dev);
720 }
721 
genpd_dev_pm_qos_notifier(struct notifier_block * nb,unsigned long val,void * ptr)722 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
723 				     unsigned long val, void *ptr)
724 {
725 	struct generic_pm_domain_data *gpd_data;
726 	struct device *dev;
727 
728 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
729 	dev = gpd_data->base.dev;
730 
731 	for (;;) {
732 		struct generic_pm_domain *genpd;
733 		struct pm_domain_data *pdd;
734 
735 		spin_lock_irq(&dev->power.lock);
736 
737 		pdd = dev->power.subsys_data ?
738 				dev->power.subsys_data->domain_data : NULL;
739 		if (pdd) {
740 			to_gpd_data(pdd)->td.constraint_changed = true;
741 			genpd = dev_to_genpd(dev);
742 		} else {
743 			genpd = ERR_PTR(-ENODATA);
744 		}
745 
746 		spin_unlock_irq(&dev->power.lock);
747 
748 		if (!IS_ERR(genpd)) {
749 			genpd_lock(genpd);
750 			genpd->max_off_time_changed = true;
751 			genpd_unlock(genpd);
752 		}
753 
754 		dev = dev->parent;
755 		if (!dev || dev->power.ignore_children)
756 			break;
757 	}
758 
759 	return NOTIFY_DONE;
760 }
761 
762 /**
763  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
764  * @work: Work structure used for scheduling the execution of this function.
765  */
genpd_power_off_work_fn(struct work_struct * work)766 static void genpd_power_off_work_fn(struct work_struct *work)
767 {
768 	struct generic_pm_domain *genpd;
769 
770 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
771 
772 	genpd_lock(genpd);
773 	genpd_power_off(genpd, false, 0);
774 	genpd_unlock(genpd);
775 }
776 
777 /**
778  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
779  * @dev: Device to handle.
780  */
__genpd_runtime_suspend(struct device * dev)781 static int __genpd_runtime_suspend(struct device *dev)
782 {
783 	int (*cb)(struct device *__dev);
784 
785 	if (dev->type && dev->type->pm)
786 		cb = dev->type->pm->runtime_suspend;
787 	else if (dev->class && dev->class->pm)
788 		cb = dev->class->pm->runtime_suspend;
789 	else if (dev->bus && dev->bus->pm)
790 		cb = dev->bus->pm->runtime_suspend;
791 	else
792 		cb = NULL;
793 
794 	if (!cb && dev->driver && dev->driver->pm)
795 		cb = dev->driver->pm->runtime_suspend;
796 
797 	return cb ? cb(dev) : 0;
798 }
799 
800 /**
801  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
802  * @dev: Device to handle.
803  */
__genpd_runtime_resume(struct device * dev)804 static int __genpd_runtime_resume(struct device *dev)
805 {
806 	int (*cb)(struct device *__dev);
807 
808 	if (dev->type && dev->type->pm)
809 		cb = dev->type->pm->runtime_resume;
810 	else if (dev->class && dev->class->pm)
811 		cb = dev->class->pm->runtime_resume;
812 	else if (dev->bus && dev->bus->pm)
813 		cb = dev->bus->pm->runtime_resume;
814 	else
815 		cb = NULL;
816 
817 	if (!cb && dev->driver && dev->driver->pm)
818 		cb = dev->driver->pm->runtime_resume;
819 
820 	return cb ? cb(dev) : 0;
821 }
822 
823 /**
824  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
825  * @dev: Device to suspend.
826  *
827  * Carry out a runtime suspend of a device under the assumption that its
828  * pm_domain field points to the domain member of an object of type
829  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
830  */
genpd_runtime_suspend(struct device * dev)831 static int genpd_runtime_suspend(struct device *dev)
832 {
833 	struct generic_pm_domain *genpd;
834 	bool (*suspend_ok)(struct device *__dev);
835 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
836 	bool runtime_pm = pm_runtime_enabled(dev);
837 	ktime_t time_start;
838 	s64 elapsed_ns;
839 	int ret;
840 
841 	dev_dbg(dev, "%s()\n", __func__);
842 
843 	genpd = dev_to_genpd(dev);
844 	if (IS_ERR(genpd))
845 		return -EINVAL;
846 
847 	/*
848 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
849 	 * callbacks for other purposes than runtime PM. In those scenarios
850 	 * runtime PM is disabled. Under these circumstances, we shall skip
851 	 * validating/measuring the PM QoS latency.
852 	 */
853 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
854 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
855 		return -EBUSY;
856 
857 	/* Measure suspend latency. */
858 	time_start = 0;
859 	if (runtime_pm)
860 		time_start = ktime_get();
861 
862 	ret = __genpd_runtime_suspend(dev);
863 	if (ret)
864 		return ret;
865 
866 	ret = genpd_stop_dev(genpd, dev);
867 	if (ret) {
868 		__genpd_runtime_resume(dev);
869 		return ret;
870 	}
871 
872 	/* Update suspend latency value if the measured time exceeds it. */
873 	if (runtime_pm) {
874 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
875 		if (elapsed_ns > td->suspend_latency_ns) {
876 			td->suspend_latency_ns = elapsed_ns;
877 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
878 				elapsed_ns);
879 			genpd->max_off_time_changed = true;
880 			td->constraint_changed = true;
881 		}
882 	}
883 
884 	/*
885 	 * If power.irq_safe is set, this routine may be run with
886 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
887 	 */
888 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
889 		return 0;
890 
891 	genpd_lock(genpd);
892 	genpd_power_off(genpd, true, 0);
893 	genpd_unlock(genpd);
894 
895 	return 0;
896 }
897 
898 /**
899  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
900  * @dev: Device to resume.
901  *
902  * Carry out a runtime resume of a device under the assumption that its
903  * pm_domain field points to the domain member of an object of type
904  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
905  */
genpd_runtime_resume(struct device * dev)906 static int genpd_runtime_resume(struct device *dev)
907 {
908 	struct generic_pm_domain *genpd;
909 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
910 	bool runtime_pm = pm_runtime_enabled(dev);
911 	ktime_t time_start;
912 	s64 elapsed_ns;
913 	int ret;
914 	bool timed = true;
915 
916 	dev_dbg(dev, "%s()\n", __func__);
917 
918 	genpd = dev_to_genpd(dev);
919 	if (IS_ERR(genpd))
920 		return -EINVAL;
921 
922 	/*
923 	 * As we don't power off a non IRQ safe domain, which holds
924 	 * an IRQ safe device, we don't need to restore power to it.
925 	 */
926 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
927 		timed = false;
928 		goto out;
929 	}
930 
931 	genpd_lock(genpd);
932 	ret = genpd_power_on(genpd, 0);
933 	genpd_unlock(genpd);
934 
935 	if (ret)
936 		return ret;
937 
938  out:
939 	/* Measure resume latency. */
940 	time_start = 0;
941 	if (timed && runtime_pm)
942 		time_start = ktime_get();
943 
944 	ret = genpd_start_dev(genpd, dev);
945 	if (ret)
946 		goto err_poweroff;
947 
948 	ret = __genpd_runtime_resume(dev);
949 	if (ret)
950 		goto err_stop;
951 
952 	/* Update resume latency value if the measured time exceeds it. */
953 	if (timed && runtime_pm) {
954 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
955 		if (elapsed_ns > td->resume_latency_ns) {
956 			td->resume_latency_ns = elapsed_ns;
957 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
958 				elapsed_ns);
959 			genpd->max_off_time_changed = true;
960 			td->constraint_changed = true;
961 		}
962 	}
963 
964 	return 0;
965 
966 err_stop:
967 	genpd_stop_dev(genpd, dev);
968 err_poweroff:
969 	if (!pm_runtime_is_irq_safe(dev) ||
970 		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
971 		genpd_lock(genpd);
972 		genpd_power_off(genpd, true, 0);
973 		genpd_unlock(genpd);
974 	}
975 
976 	return ret;
977 }
978 
979 static bool pd_ignore_unused;
pd_ignore_unused_setup(char * __unused)980 static int __init pd_ignore_unused_setup(char *__unused)
981 {
982 	pd_ignore_unused = true;
983 	return 1;
984 }
985 __setup("pd_ignore_unused", pd_ignore_unused_setup);
986 
987 /**
988  * genpd_power_off_unused - Power off all PM domains with no devices in use.
989  */
genpd_power_off_unused(void)990 static int __init genpd_power_off_unused(void)
991 {
992 	struct generic_pm_domain *genpd;
993 
994 	if (pd_ignore_unused) {
995 		pr_warn("genpd: Not disabling unused power domains\n");
996 		return 0;
997 	}
998 
999 	mutex_lock(&gpd_list_lock);
1000 
1001 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1002 		genpd_queue_power_off_work(genpd);
1003 
1004 	mutex_unlock(&gpd_list_lock);
1005 
1006 	return 0;
1007 }
1008 late_initcall_sync(genpd_power_off_unused);
1009 
1010 #ifdef CONFIG_PM_SLEEP
1011 
1012 /**
1013  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1014  * @genpd: PM domain to power off, if possible.
1015  * @use_lock: use the lock.
1016  * @depth: nesting count for lockdep.
1017  *
1018  * Check if the given PM domain can be powered off (during system suspend or
1019  * hibernation) and do that if so.  Also, in that case propagate to its parents.
1020  *
1021  * This function is only called in "noirq" and "syscore" stages of system power
1022  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1023  * these cases the lock must be held.
1024  */
genpd_sync_power_off(struct generic_pm_domain * genpd,bool use_lock,unsigned int depth)1025 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1026 				 unsigned int depth)
1027 {
1028 	struct gpd_link *link;
1029 
1030 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1031 		return;
1032 
1033 	if (genpd->suspended_count != genpd->device_count
1034 	    || atomic_read(&genpd->sd_count) > 0)
1035 		return;
1036 
1037 	/* Choose the deepest state when suspending */
1038 	genpd->state_idx = genpd->state_count - 1;
1039 	if (_genpd_power_off(genpd, false))
1040 		return;
1041 
1042 	genpd->status = GENPD_STATE_OFF;
1043 
1044 	list_for_each_entry(link, &genpd->child_links, child_node) {
1045 		genpd_sd_counter_dec(link->parent);
1046 
1047 		if (use_lock)
1048 			genpd_lock_nested(link->parent, depth + 1);
1049 
1050 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1051 
1052 		if (use_lock)
1053 			genpd_unlock(link->parent);
1054 	}
1055 }
1056 
1057 /**
1058  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1059  * @genpd: PM domain to power on.
1060  * @use_lock: use the lock.
1061  * @depth: nesting count for lockdep.
1062  *
1063  * This function is only called in "noirq" and "syscore" stages of system power
1064  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1065  * these cases the lock must be held.
1066  */
genpd_sync_power_on(struct generic_pm_domain * genpd,bool use_lock,unsigned int depth)1067 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1068 				unsigned int depth)
1069 {
1070 	struct gpd_link *link;
1071 
1072 	if (genpd_status_on(genpd))
1073 		return;
1074 
1075 	list_for_each_entry(link, &genpd->child_links, child_node) {
1076 		genpd_sd_counter_inc(link->parent);
1077 
1078 		if (use_lock)
1079 			genpd_lock_nested(link->parent, depth + 1);
1080 
1081 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1082 
1083 		if (use_lock)
1084 			genpd_unlock(link->parent);
1085 	}
1086 
1087 	_genpd_power_on(genpd, false);
1088 	genpd->status = GENPD_STATE_ON;
1089 }
1090 
1091 /**
1092  * resume_needed - Check whether to resume a device before system suspend.
1093  * @dev: Device to check.
1094  * @genpd: PM domain the device belongs to.
1095  *
1096  * There are two cases in which a device that can wake up the system from sleep
1097  * states should be resumed by genpd_prepare(): (1) if the device is enabled
1098  * to wake up the system and it has to remain active for this purpose while the
1099  * system is in the sleep state and (2) if the device is not enabled to wake up
1100  * the system from sleep states and it generally doesn't generate wakeup signals
1101  * by itself (those signals are generated on its behalf by other parts of the
1102  * system).  In the latter case it may be necessary to reconfigure the device's
1103  * wakeup settings during system suspend, because it may have been set up to
1104  * signal remote wakeup from the system's working state as needed by runtime PM.
1105  * Return 'true' in either of the above cases.
1106  */
resume_needed(struct device * dev,const struct generic_pm_domain * genpd)1107 static bool resume_needed(struct device *dev,
1108 			  const struct generic_pm_domain *genpd)
1109 {
1110 	bool active_wakeup;
1111 
1112 	if (!device_can_wakeup(dev))
1113 		return false;
1114 
1115 	active_wakeup = genpd_is_active_wakeup(genpd);
1116 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
1117 }
1118 
1119 /**
1120  * genpd_prepare - Start power transition of a device in a PM domain.
1121  * @dev: Device to start the transition of.
1122  *
1123  * Start a power transition of a device (during a system-wide power transition)
1124  * under the assumption that its pm_domain field points to the domain member of
1125  * an object of type struct generic_pm_domain representing a PM domain
1126  * consisting of I/O devices.
1127  */
genpd_prepare(struct device * dev)1128 static int genpd_prepare(struct device *dev)
1129 {
1130 	struct generic_pm_domain *genpd;
1131 	int ret;
1132 
1133 	dev_dbg(dev, "%s()\n", __func__);
1134 
1135 	genpd = dev_to_genpd(dev);
1136 	if (IS_ERR(genpd))
1137 		return -EINVAL;
1138 
1139 	/*
1140 	 * If a wakeup request is pending for the device, it should be woken up
1141 	 * at this point and a system wakeup event should be reported if it's
1142 	 * set up to wake up the system from sleep states.
1143 	 */
1144 	if (resume_needed(dev, genpd))
1145 		pm_runtime_resume(dev);
1146 
1147 	genpd_lock(genpd);
1148 
1149 	if (genpd->prepared_count++ == 0)
1150 		genpd->suspended_count = 0;
1151 
1152 	genpd_unlock(genpd);
1153 
1154 	ret = pm_generic_prepare(dev);
1155 	if (ret < 0) {
1156 		genpd_lock(genpd);
1157 
1158 		genpd->prepared_count--;
1159 
1160 		genpd_unlock(genpd);
1161 	}
1162 
1163 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1164 	return ret >= 0 ? 0 : ret;
1165 }
1166 
1167 /**
1168  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1169  *   I/O pm domain.
1170  * @dev: Device to suspend.
1171  * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1172  *
1173  * Stop the device and remove power from the domain if all devices in it have
1174  * been stopped.
1175  */
genpd_finish_suspend(struct device * dev,bool poweroff)1176 static int genpd_finish_suspend(struct device *dev, bool poweroff)
1177 {
1178 	struct generic_pm_domain *genpd;
1179 	int ret = 0;
1180 
1181 	genpd = dev_to_genpd(dev);
1182 	if (IS_ERR(genpd))
1183 		return -EINVAL;
1184 
1185 	if (poweroff)
1186 		ret = pm_generic_poweroff_noirq(dev);
1187 	else
1188 		ret = pm_generic_suspend_noirq(dev);
1189 	if (ret)
1190 		return ret;
1191 
1192 	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1193 		return 0;
1194 
1195 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1196 	    !pm_runtime_status_suspended(dev)) {
1197 		ret = genpd_stop_dev(genpd, dev);
1198 		if (ret) {
1199 			if (poweroff)
1200 				pm_generic_restore_noirq(dev);
1201 			else
1202 				pm_generic_resume_noirq(dev);
1203 			return ret;
1204 		}
1205 	}
1206 
1207 	genpd_lock(genpd);
1208 	genpd->suspended_count++;
1209 	genpd_sync_power_off(genpd, true, 0);
1210 	genpd_unlock(genpd);
1211 
1212 	return 0;
1213 }
1214 
1215 /**
1216  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1217  * @dev: Device to suspend.
1218  *
1219  * Stop the device and remove power from the domain if all devices in it have
1220  * been stopped.
1221  */
genpd_suspend_noirq(struct device * dev)1222 static int genpd_suspend_noirq(struct device *dev)
1223 {
1224 	dev_dbg(dev, "%s()\n", __func__);
1225 
1226 	return genpd_finish_suspend(dev, false);
1227 }
1228 
1229 /**
1230  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1231  * @dev: Device to resume.
1232  *
1233  * Restore power to the device's PM domain, if necessary, and start the device.
1234  */
genpd_resume_noirq(struct device * dev)1235 static int genpd_resume_noirq(struct device *dev)
1236 {
1237 	struct generic_pm_domain *genpd;
1238 	int ret;
1239 
1240 	dev_dbg(dev, "%s()\n", __func__);
1241 
1242 	genpd = dev_to_genpd(dev);
1243 	if (IS_ERR(genpd))
1244 		return -EINVAL;
1245 
1246 	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1247 		return pm_generic_resume_noirq(dev);
1248 
1249 	genpd_lock(genpd);
1250 	genpd_sync_power_on(genpd, true, 0);
1251 	genpd->suspended_count--;
1252 	genpd_unlock(genpd);
1253 
1254 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1255 	    !pm_runtime_status_suspended(dev)) {
1256 		ret = genpd_start_dev(genpd, dev);
1257 		if (ret)
1258 			return ret;
1259 	}
1260 
1261 	return pm_generic_resume_noirq(dev);
1262 }
1263 
1264 /**
1265  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1266  * @dev: Device to freeze.
1267  *
1268  * Carry out a late freeze of a device under the assumption that its
1269  * pm_domain field points to the domain member of an object of type
1270  * struct generic_pm_domain representing a power domain consisting of I/O
1271  * devices.
1272  */
genpd_freeze_noirq(struct device * dev)1273 static int genpd_freeze_noirq(struct device *dev)
1274 {
1275 	const struct generic_pm_domain *genpd;
1276 	int ret = 0;
1277 
1278 	dev_dbg(dev, "%s()\n", __func__);
1279 
1280 	genpd = dev_to_genpd(dev);
1281 	if (IS_ERR(genpd))
1282 		return -EINVAL;
1283 
1284 	ret = pm_generic_freeze_noirq(dev);
1285 	if (ret)
1286 		return ret;
1287 
1288 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1289 	    !pm_runtime_status_suspended(dev))
1290 		ret = genpd_stop_dev(genpd, dev);
1291 
1292 	return ret;
1293 }
1294 
1295 /**
1296  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1297  * @dev: Device to thaw.
1298  *
1299  * Start the device, unless power has been removed from the domain already
1300  * before the system transition.
1301  */
genpd_thaw_noirq(struct device * dev)1302 static int genpd_thaw_noirq(struct device *dev)
1303 {
1304 	const struct generic_pm_domain *genpd;
1305 	int ret = 0;
1306 
1307 	dev_dbg(dev, "%s()\n", __func__);
1308 
1309 	genpd = dev_to_genpd(dev);
1310 	if (IS_ERR(genpd))
1311 		return -EINVAL;
1312 
1313 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1314 	    !pm_runtime_status_suspended(dev)) {
1315 		ret = genpd_start_dev(genpd, dev);
1316 		if (ret)
1317 			return ret;
1318 	}
1319 
1320 	return pm_generic_thaw_noirq(dev);
1321 }
1322 
1323 /**
1324  * genpd_poweroff_noirq - Completion of hibernation of device in an
1325  *   I/O PM domain.
1326  * @dev: Device to poweroff.
1327  *
1328  * Stop the device and remove power from the domain if all devices in it have
1329  * been stopped.
1330  */
genpd_poweroff_noirq(struct device * dev)1331 static int genpd_poweroff_noirq(struct device *dev)
1332 {
1333 	dev_dbg(dev, "%s()\n", __func__);
1334 
1335 	return genpd_finish_suspend(dev, true);
1336 }
1337 
1338 /**
1339  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1340  * @dev: Device to resume.
1341  *
1342  * Make sure the domain will be in the same power state as before the
1343  * hibernation the system is resuming from and start the device if necessary.
1344  */
genpd_restore_noirq(struct device * dev)1345 static int genpd_restore_noirq(struct device *dev)
1346 {
1347 	struct generic_pm_domain *genpd;
1348 	int ret = 0;
1349 
1350 	dev_dbg(dev, "%s()\n", __func__);
1351 
1352 	genpd = dev_to_genpd(dev);
1353 	if (IS_ERR(genpd))
1354 		return -EINVAL;
1355 
1356 	/*
1357 	 * At this point suspended_count == 0 means we are being run for the
1358 	 * first time for the given domain in the present cycle.
1359 	 */
1360 	genpd_lock(genpd);
1361 	if (genpd->suspended_count++ == 0) {
1362 		/*
1363 		 * The boot kernel might put the domain into arbitrary state,
1364 		 * so make it appear as powered off to genpd_sync_power_on(),
1365 		 * so that it tries to power it on in case it was really off.
1366 		 */
1367 		genpd->status = GENPD_STATE_OFF;
1368 	}
1369 
1370 	genpd_sync_power_on(genpd, true, 0);
1371 	genpd_unlock(genpd);
1372 
1373 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1374 	    !pm_runtime_status_suspended(dev)) {
1375 		ret = genpd_start_dev(genpd, dev);
1376 		if (ret)
1377 			return ret;
1378 	}
1379 
1380 	return pm_generic_restore_noirq(dev);
1381 }
1382 
1383 /**
1384  * genpd_complete - Complete power transition of a device in a power domain.
1385  * @dev: Device to complete the transition of.
1386  *
1387  * Complete a power transition of a device (during a system-wide power
1388  * transition) under the assumption that its pm_domain field points to the
1389  * domain member of an object of type struct generic_pm_domain representing
1390  * a power domain consisting of I/O devices.
1391  */
genpd_complete(struct device * dev)1392 static void genpd_complete(struct device *dev)
1393 {
1394 	struct generic_pm_domain *genpd;
1395 
1396 	dev_dbg(dev, "%s()\n", __func__);
1397 
1398 	genpd = dev_to_genpd(dev);
1399 	if (IS_ERR(genpd))
1400 		return;
1401 
1402 	pm_generic_complete(dev);
1403 
1404 	genpd_lock(genpd);
1405 
1406 	genpd->prepared_count--;
1407 	if (!genpd->prepared_count)
1408 		genpd_queue_power_off_work(genpd);
1409 
1410 	genpd_unlock(genpd);
1411 }
1412 
genpd_switch_state(struct device * dev,bool suspend)1413 static void genpd_switch_state(struct device *dev, bool suspend)
1414 {
1415 	struct generic_pm_domain *genpd;
1416 	bool use_lock;
1417 
1418 	genpd = dev_to_genpd_safe(dev);
1419 	if (!genpd)
1420 		return;
1421 
1422 	use_lock = genpd_is_irq_safe(genpd);
1423 
1424 	if (use_lock)
1425 		genpd_lock(genpd);
1426 
1427 	if (suspend) {
1428 		genpd->suspended_count++;
1429 		genpd_sync_power_off(genpd, use_lock, 0);
1430 	} else {
1431 		genpd_sync_power_on(genpd, use_lock, 0);
1432 		genpd->suspended_count--;
1433 	}
1434 
1435 	if (use_lock)
1436 		genpd_unlock(genpd);
1437 }
1438 
1439 /**
1440  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1441  * @dev: The device that is attached to the genpd, that can be suspended.
1442  *
1443  * This routine should typically be called for a device that needs to be
1444  * suspended during the syscore suspend phase. It may also be called during
1445  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1446  * genpd.
1447  */
dev_pm_genpd_suspend(struct device * dev)1448 void dev_pm_genpd_suspend(struct device *dev)
1449 {
1450 	genpd_switch_state(dev, true);
1451 }
1452 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1453 
1454 /**
1455  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1456  * @dev: The device that is attached to the genpd, which needs to be resumed.
1457  *
1458  * This routine should typically be called for a device that needs to be resumed
1459  * during the syscore resume phase. It may also be called during suspend-to-idle
1460  * to resume a corresponding CPU device that is attached to a genpd.
1461  */
dev_pm_genpd_resume(struct device * dev)1462 void dev_pm_genpd_resume(struct device *dev)
1463 {
1464 	genpd_switch_state(dev, false);
1465 }
1466 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1467 
1468 #else /* !CONFIG_PM_SLEEP */
1469 
1470 #define genpd_prepare		NULL
1471 #define genpd_suspend_noirq	NULL
1472 #define genpd_resume_noirq	NULL
1473 #define genpd_freeze_noirq	NULL
1474 #define genpd_thaw_noirq	NULL
1475 #define genpd_poweroff_noirq	NULL
1476 #define genpd_restore_noirq	NULL
1477 #define genpd_complete		NULL
1478 
1479 #endif /* CONFIG_PM_SLEEP */
1480 
genpd_alloc_dev_data(struct device * dev)1481 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
1482 {
1483 	struct generic_pm_domain_data *gpd_data;
1484 	int ret;
1485 
1486 	ret = dev_pm_get_subsys_data(dev);
1487 	if (ret)
1488 		return ERR_PTR(ret);
1489 
1490 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1491 	if (!gpd_data) {
1492 		ret = -ENOMEM;
1493 		goto err_put;
1494 	}
1495 
1496 	gpd_data->base.dev = dev;
1497 	gpd_data->td.constraint_changed = true;
1498 	gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1499 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1500 	gpd_data->next_wakeup = KTIME_MAX;
1501 
1502 	spin_lock_irq(&dev->power.lock);
1503 
1504 	if (dev->power.subsys_data->domain_data) {
1505 		ret = -EINVAL;
1506 		goto err_free;
1507 	}
1508 
1509 	dev->power.subsys_data->domain_data = &gpd_data->base;
1510 
1511 	spin_unlock_irq(&dev->power.lock);
1512 
1513 	return gpd_data;
1514 
1515  err_free:
1516 	spin_unlock_irq(&dev->power.lock);
1517 	kfree(gpd_data);
1518  err_put:
1519 	dev_pm_put_subsys_data(dev);
1520 	return ERR_PTR(ret);
1521 }
1522 
genpd_free_dev_data(struct device * dev,struct generic_pm_domain_data * gpd_data)1523 static void genpd_free_dev_data(struct device *dev,
1524 				struct generic_pm_domain_data *gpd_data)
1525 {
1526 	spin_lock_irq(&dev->power.lock);
1527 
1528 	dev->power.subsys_data->domain_data = NULL;
1529 
1530 	spin_unlock_irq(&dev->power.lock);
1531 
1532 	kfree(gpd_data);
1533 	dev_pm_put_subsys_data(dev);
1534 }
1535 
genpd_update_cpumask(struct generic_pm_domain * genpd,int cpu,bool set,unsigned int depth)1536 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1537 				 int cpu, bool set, unsigned int depth)
1538 {
1539 	struct gpd_link *link;
1540 
1541 	if (!genpd_is_cpu_domain(genpd))
1542 		return;
1543 
1544 	list_for_each_entry(link, &genpd->child_links, child_node) {
1545 		struct generic_pm_domain *parent = link->parent;
1546 
1547 		genpd_lock_nested(parent, depth + 1);
1548 		genpd_update_cpumask(parent, cpu, set, depth + 1);
1549 		genpd_unlock(parent);
1550 	}
1551 
1552 	if (set)
1553 		cpumask_set_cpu(cpu, genpd->cpus);
1554 	else
1555 		cpumask_clear_cpu(cpu, genpd->cpus);
1556 }
1557 
genpd_set_cpumask(struct generic_pm_domain * genpd,int cpu)1558 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1559 {
1560 	if (cpu >= 0)
1561 		genpd_update_cpumask(genpd, cpu, true, 0);
1562 }
1563 
genpd_clear_cpumask(struct generic_pm_domain * genpd,int cpu)1564 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1565 {
1566 	if (cpu >= 0)
1567 		genpd_update_cpumask(genpd, cpu, false, 0);
1568 }
1569 
genpd_get_cpu(struct generic_pm_domain * genpd,struct device * dev)1570 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1571 {
1572 	int cpu;
1573 
1574 	if (!genpd_is_cpu_domain(genpd))
1575 		return -1;
1576 
1577 	for_each_possible_cpu(cpu) {
1578 		if (get_cpu_device(cpu) == dev)
1579 			return cpu;
1580 	}
1581 
1582 	return -1;
1583 }
1584 
genpd_add_device(struct generic_pm_domain * genpd,struct device * dev,struct device * base_dev)1585 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1586 			    struct device *base_dev)
1587 {
1588 	struct generic_pm_domain_data *gpd_data;
1589 	int ret;
1590 
1591 	dev_dbg(dev, "%s()\n", __func__);
1592 
1593 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1594 		return -EINVAL;
1595 
1596 	gpd_data = genpd_alloc_dev_data(dev);
1597 	if (IS_ERR(gpd_data))
1598 		return PTR_ERR(gpd_data);
1599 
1600 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1601 
1602 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1603 	if (ret)
1604 		goto out;
1605 
1606 	genpd_lock(genpd);
1607 
1608 	genpd_set_cpumask(genpd, gpd_data->cpu);
1609 	dev_pm_domain_set(dev, &genpd->domain);
1610 
1611 	genpd->device_count++;
1612 	genpd->max_off_time_changed = true;
1613 
1614 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1615 
1616 	genpd_unlock(genpd);
1617  out:
1618 	if (ret)
1619 		genpd_free_dev_data(dev, gpd_data);
1620 	else
1621 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1622 					DEV_PM_QOS_RESUME_LATENCY);
1623 
1624 	return ret;
1625 }
1626 
1627 /**
1628  * pm_genpd_add_device - Add a device to an I/O PM domain.
1629  * @genpd: PM domain to add the device to.
1630  * @dev: Device to be added.
1631  */
pm_genpd_add_device(struct generic_pm_domain * genpd,struct device * dev)1632 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1633 {
1634 	int ret;
1635 
1636 	mutex_lock(&gpd_list_lock);
1637 	ret = genpd_add_device(genpd, dev, dev);
1638 	mutex_unlock(&gpd_list_lock);
1639 
1640 	return ret;
1641 }
1642 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1643 
genpd_remove_device(struct generic_pm_domain * genpd,struct device * dev)1644 static int genpd_remove_device(struct generic_pm_domain *genpd,
1645 			       struct device *dev)
1646 {
1647 	struct generic_pm_domain_data *gpd_data;
1648 	struct pm_domain_data *pdd;
1649 	int ret = 0;
1650 
1651 	dev_dbg(dev, "%s()\n", __func__);
1652 
1653 	pdd = dev->power.subsys_data->domain_data;
1654 	gpd_data = to_gpd_data(pdd);
1655 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1656 				   DEV_PM_QOS_RESUME_LATENCY);
1657 
1658 	genpd_lock(genpd);
1659 
1660 	if (genpd->prepared_count > 0) {
1661 		ret = -EAGAIN;
1662 		goto out;
1663 	}
1664 
1665 	genpd->device_count--;
1666 	genpd->max_off_time_changed = true;
1667 
1668 	genpd_clear_cpumask(genpd, gpd_data->cpu);
1669 	dev_pm_domain_set(dev, NULL);
1670 
1671 	list_del_init(&pdd->list_node);
1672 
1673 	genpd_unlock(genpd);
1674 
1675 	if (genpd->detach_dev)
1676 		genpd->detach_dev(genpd, dev);
1677 
1678 	genpd_free_dev_data(dev, gpd_data);
1679 
1680 	return 0;
1681 
1682  out:
1683 	genpd_unlock(genpd);
1684 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1685 
1686 	return ret;
1687 }
1688 
1689 /**
1690  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1691  * @dev: Device to be removed.
1692  */
pm_genpd_remove_device(struct device * dev)1693 int pm_genpd_remove_device(struct device *dev)
1694 {
1695 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1696 
1697 	if (!genpd)
1698 		return -EINVAL;
1699 
1700 	return genpd_remove_device(genpd, dev);
1701 }
1702 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1703 
1704 /**
1705  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1706  *
1707  * @dev: Device that should be associated with the notifier
1708  * @nb: The notifier block to register
1709  *
1710  * Users may call this function to add a genpd power on/off notifier for an
1711  * attached @dev. Only one notifier per device is allowed. The notifier is
1712  * sent when genpd is powering on/off the PM domain.
1713  *
1714  * It is assumed that the user guarantee that the genpd wouldn't be detached
1715  * while this routine is getting called.
1716  *
1717  * Returns 0 on success and negative error values on failures.
1718  */
dev_pm_genpd_add_notifier(struct device * dev,struct notifier_block * nb)1719 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1720 {
1721 	struct generic_pm_domain *genpd;
1722 	struct generic_pm_domain_data *gpd_data;
1723 	int ret;
1724 
1725 	genpd = dev_to_genpd_safe(dev);
1726 	if (!genpd)
1727 		return -ENODEV;
1728 
1729 	if (WARN_ON(!dev->power.subsys_data ||
1730 		     !dev->power.subsys_data->domain_data))
1731 		return -EINVAL;
1732 
1733 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1734 	if (gpd_data->power_nb)
1735 		return -EEXIST;
1736 
1737 	genpd_lock(genpd);
1738 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1739 	genpd_unlock(genpd);
1740 
1741 	if (ret) {
1742 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1743 			 genpd->name);
1744 		return ret;
1745 	}
1746 
1747 	gpd_data->power_nb = nb;
1748 	return 0;
1749 }
1750 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1751 
1752 /**
1753  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1754  *
1755  * @dev: Device that is associated with the notifier
1756  *
1757  * Users may call this function to remove a genpd power on/off notifier for an
1758  * attached @dev.
1759  *
1760  * It is assumed that the user guarantee that the genpd wouldn't be detached
1761  * while this routine is getting called.
1762  *
1763  * Returns 0 on success and negative error values on failures.
1764  */
dev_pm_genpd_remove_notifier(struct device * dev)1765 int dev_pm_genpd_remove_notifier(struct device *dev)
1766 {
1767 	struct generic_pm_domain *genpd;
1768 	struct generic_pm_domain_data *gpd_data;
1769 	int ret;
1770 
1771 	genpd = dev_to_genpd_safe(dev);
1772 	if (!genpd)
1773 		return -ENODEV;
1774 
1775 	if (WARN_ON(!dev->power.subsys_data ||
1776 		     !dev->power.subsys_data->domain_data))
1777 		return -EINVAL;
1778 
1779 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1780 	if (!gpd_data->power_nb)
1781 		return -ENODEV;
1782 
1783 	genpd_lock(genpd);
1784 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1785 					    gpd_data->power_nb);
1786 	genpd_unlock(genpd);
1787 
1788 	if (ret) {
1789 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1790 			 genpd->name);
1791 		return ret;
1792 	}
1793 
1794 	gpd_data->power_nb = NULL;
1795 	return 0;
1796 }
1797 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1798 
genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)1799 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1800 			       struct generic_pm_domain *subdomain)
1801 {
1802 	struct gpd_link *link, *itr;
1803 	int ret = 0;
1804 
1805 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1806 	    || genpd == subdomain)
1807 		return -EINVAL;
1808 
1809 	/*
1810 	 * If the domain can be powered on/off in an IRQ safe
1811 	 * context, ensure that the subdomain can also be
1812 	 * powered on/off in that context.
1813 	 */
1814 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1815 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1816 				genpd->name, subdomain->name);
1817 		return -EINVAL;
1818 	}
1819 
1820 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1821 	if (!link)
1822 		return -ENOMEM;
1823 
1824 	genpd_lock(subdomain);
1825 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1826 
1827 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1828 		ret = -EINVAL;
1829 		goto out;
1830 	}
1831 
1832 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1833 		if (itr->child == subdomain && itr->parent == genpd) {
1834 			ret = -EINVAL;
1835 			goto out;
1836 		}
1837 	}
1838 
1839 	link->parent = genpd;
1840 	list_add_tail(&link->parent_node, &genpd->parent_links);
1841 	link->child = subdomain;
1842 	list_add_tail(&link->child_node, &subdomain->child_links);
1843 	if (genpd_status_on(subdomain))
1844 		genpd_sd_counter_inc(genpd);
1845 
1846  out:
1847 	genpd_unlock(genpd);
1848 	genpd_unlock(subdomain);
1849 	if (ret)
1850 		kfree(link);
1851 	return ret;
1852 }
1853 
1854 /**
1855  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1856  * @genpd: Leader PM domain to add the subdomain to.
1857  * @subdomain: Subdomain to be added.
1858  */
pm_genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)1859 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1860 			   struct generic_pm_domain *subdomain)
1861 {
1862 	int ret;
1863 
1864 	mutex_lock(&gpd_list_lock);
1865 	ret = genpd_add_subdomain(genpd, subdomain);
1866 	mutex_unlock(&gpd_list_lock);
1867 
1868 	return ret;
1869 }
1870 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1871 
1872 /**
1873  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1874  * @genpd: Leader PM domain to remove the subdomain from.
1875  * @subdomain: Subdomain to be removed.
1876  */
pm_genpd_remove_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)1877 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1878 			      struct generic_pm_domain *subdomain)
1879 {
1880 	struct gpd_link *l, *link;
1881 	int ret = -EINVAL;
1882 
1883 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1884 		return -EINVAL;
1885 
1886 	genpd_lock(subdomain);
1887 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1888 
1889 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1890 		pr_warn("%s: unable to remove subdomain %s\n",
1891 			genpd->name, subdomain->name);
1892 		ret = -EBUSY;
1893 		goto out;
1894 	}
1895 
1896 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1897 		if (link->child != subdomain)
1898 			continue;
1899 
1900 		list_del(&link->parent_node);
1901 		list_del(&link->child_node);
1902 		kfree(link);
1903 		if (genpd_status_on(subdomain))
1904 			genpd_sd_counter_dec(genpd);
1905 
1906 		ret = 0;
1907 		break;
1908 	}
1909 
1910 out:
1911 	genpd_unlock(genpd);
1912 	genpd_unlock(subdomain);
1913 
1914 	return ret;
1915 }
1916 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1917 
genpd_free_default_power_state(struct genpd_power_state * states,unsigned int state_count)1918 static void genpd_free_default_power_state(struct genpd_power_state *states,
1919 					   unsigned int state_count)
1920 {
1921 	kfree(states);
1922 }
1923 
genpd_set_default_power_state(struct generic_pm_domain * genpd)1924 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1925 {
1926 	struct genpd_power_state *state;
1927 
1928 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1929 	if (!state)
1930 		return -ENOMEM;
1931 
1932 	genpd->states = state;
1933 	genpd->state_count = 1;
1934 	genpd->free_states = genpd_free_default_power_state;
1935 
1936 	return 0;
1937 }
1938 
genpd_lock_init(struct generic_pm_domain * genpd)1939 static void genpd_lock_init(struct generic_pm_domain *genpd)
1940 {
1941 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1942 		spin_lock_init(&genpd->slock);
1943 		genpd->lock_ops = &genpd_spin_ops;
1944 	} else {
1945 		mutex_init(&genpd->mlock);
1946 		genpd->lock_ops = &genpd_mtx_ops;
1947 	}
1948 }
1949 
1950 /**
1951  * pm_genpd_init - Initialize a generic I/O PM domain object.
1952  * @genpd: PM domain object to initialize.
1953  * @gov: PM domain governor to associate with the domain (may be NULL).
1954  * @is_off: Initial value of the domain's power_is_off field.
1955  *
1956  * Returns 0 on successful initialization, else a negative error code.
1957  */
pm_genpd_init(struct generic_pm_domain * genpd,struct dev_power_governor * gov,bool is_off)1958 int pm_genpd_init(struct generic_pm_domain *genpd,
1959 		  struct dev_power_governor *gov, bool is_off)
1960 {
1961 	int ret;
1962 
1963 	if (IS_ERR_OR_NULL(genpd))
1964 		return -EINVAL;
1965 
1966 	INIT_LIST_HEAD(&genpd->parent_links);
1967 	INIT_LIST_HEAD(&genpd->child_links);
1968 	INIT_LIST_HEAD(&genpd->dev_list);
1969 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
1970 	genpd_lock_init(genpd);
1971 	genpd->gov = gov;
1972 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1973 	atomic_set(&genpd->sd_count, 0);
1974 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
1975 	genpd->device_count = 0;
1976 	genpd->max_off_time_ns = -1;
1977 	genpd->max_off_time_changed = true;
1978 	genpd->next_wakeup = KTIME_MAX;
1979 	genpd->provider = NULL;
1980 	genpd->has_provider = false;
1981 	genpd->accounting_time = ktime_get();
1982 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1983 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1984 	genpd->domain.ops.prepare = genpd_prepare;
1985 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1986 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1987 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1988 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1989 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1990 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1991 	genpd->domain.ops.complete = genpd_complete;
1992 	genpd->domain.start = genpd_dev_pm_start;
1993 
1994 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1995 		genpd->dev_ops.stop = pm_clk_suspend;
1996 		genpd->dev_ops.start = pm_clk_resume;
1997 	}
1998 
1999 	/* Always-on domains must be powered on at initialization. */
2000 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2001 			!genpd_status_on(genpd))
2002 		return -EINVAL;
2003 
2004 	if (genpd_is_cpu_domain(genpd) &&
2005 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2006 		return -ENOMEM;
2007 
2008 	/* Use only one "off" state if there were no states declared */
2009 	if (genpd->state_count == 0) {
2010 		ret = genpd_set_default_power_state(genpd);
2011 		if (ret) {
2012 			if (genpd_is_cpu_domain(genpd))
2013 				free_cpumask_var(genpd->cpus);
2014 			return ret;
2015 		}
2016 	} else if (!gov && genpd->state_count > 1) {
2017 		pr_warn("%s: no governor for states\n", genpd->name);
2018 	}
2019 
2020 	device_initialize(&genpd->dev);
2021 	dev_set_name(&genpd->dev, "%s", genpd->name);
2022 
2023 	mutex_lock(&gpd_list_lock);
2024 	list_add(&genpd->gpd_list_node, &gpd_list);
2025 	genpd_debug_add(genpd);
2026 	mutex_unlock(&gpd_list_lock);
2027 
2028 	return 0;
2029 }
2030 EXPORT_SYMBOL_GPL(pm_genpd_init);
2031 
genpd_remove(struct generic_pm_domain * genpd)2032 static int genpd_remove(struct generic_pm_domain *genpd)
2033 {
2034 	struct gpd_link *l, *link;
2035 
2036 	if (IS_ERR_OR_NULL(genpd))
2037 		return -EINVAL;
2038 
2039 	genpd_lock(genpd);
2040 
2041 	if (genpd->has_provider) {
2042 		genpd_unlock(genpd);
2043 		pr_err("Provider present, unable to remove %s\n", genpd->name);
2044 		return -EBUSY;
2045 	}
2046 
2047 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2048 		genpd_unlock(genpd);
2049 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2050 		return -EBUSY;
2051 	}
2052 
2053 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2054 		list_del(&link->parent_node);
2055 		list_del(&link->child_node);
2056 		kfree(link);
2057 	}
2058 
2059 	list_del(&genpd->gpd_list_node);
2060 	genpd_unlock(genpd);
2061 	genpd_debug_remove(genpd);
2062 	cancel_work_sync(&genpd->power_off_work);
2063 	if (genpd_is_cpu_domain(genpd))
2064 		free_cpumask_var(genpd->cpus);
2065 	if (genpd->free_states)
2066 		genpd->free_states(genpd->states, genpd->state_count);
2067 
2068 	pr_debug("%s: removed %s\n", __func__, genpd->name);
2069 
2070 	return 0;
2071 }
2072 
2073 /**
2074  * pm_genpd_remove - Remove a generic I/O PM domain
2075  * @genpd: Pointer to PM domain that is to be removed.
2076  *
2077  * To remove the PM domain, this function:
2078  *  - Removes the PM domain as a subdomain to any parent domains,
2079  *    if it was added.
2080  *  - Removes the PM domain from the list of registered PM domains.
2081  *
2082  * The PM domain will only be removed, if the associated provider has
2083  * been removed, it is not a parent to any other PM domain and has no
2084  * devices associated with it.
2085  */
pm_genpd_remove(struct generic_pm_domain * genpd)2086 int pm_genpd_remove(struct generic_pm_domain *genpd)
2087 {
2088 	int ret;
2089 
2090 	mutex_lock(&gpd_list_lock);
2091 	ret = genpd_remove(genpd);
2092 	mutex_unlock(&gpd_list_lock);
2093 
2094 	return ret;
2095 }
2096 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2097 
2098 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2099 
2100 /*
2101  * Device Tree based PM domain providers.
2102  *
2103  * The code below implements generic device tree based PM domain providers that
2104  * bind device tree nodes with generic PM domains registered in the system.
2105  *
2106  * Any driver that registers generic PM domains and needs to support binding of
2107  * devices to these domains is supposed to register a PM domain provider, which
2108  * maps a PM domain specifier retrieved from the device tree to a PM domain.
2109  *
2110  * Two simple mapping functions have been provided for convenience:
2111  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2112  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2113  *    index.
2114  */
2115 
2116 /**
2117  * struct of_genpd_provider - PM domain provider registration structure
2118  * @link: Entry in global list of PM domain providers
2119  * @node: Pointer to device tree node of PM domain provider
2120  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2121  *         into a PM domain.
2122  * @data: context pointer to be passed into @xlate callback
2123  */
2124 struct of_genpd_provider {
2125 	struct list_head link;
2126 	struct device_node *node;
2127 	genpd_xlate_t xlate;
2128 	void *data;
2129 };
2130 
2131 /* List of registered PM domain providers. */
2132 static LIST_HEAD(of_genpd_providers);
2133 /* Mutex to protect the list above. */
2134 static DEFINE_MUTEX(of_genpd_mutex);
2135 
2136 /**
2137  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2138  * @genpdspec: OF phandle args to map into a PM domain
2139  * @data: xlate function private data - pointer to struct generic_pm_domain
2140  *
2141  * This is a generic xlate function that can be used to model PM domains that
2142  * have their own device tree nodes. The private data of xlate function needs
2143  * to be a valid pointer to struct generic_pm_domain.
2144  */
genpd_xlate_simple(struct of_phandle_args * genpdspec,void * data)2145 static struct generic_pm_domain *genpd_xlate_simple(
2146 					struct of_phandle_args *genpdspec,
2147 					void *data)
2148 {
2149 	return data;
2150 }
2151 
2152 /**
2153  * genpd_xlate_onecell() - Xlate function using a single index.
2154  * @genpdspec: OF phandle args to map into a PM domain
2155  * @data: xlate function private data - pointer to struct genpd_onecell_data
2156  *
2157  * This is a generic xlate function that can be used to model simple PM domain
2158  * controllers that have one device tree node and provide multiple PM domains.
2159  * A single cell is used as an index into an array of PM domains specified in
2160  * the genpd_onecell_data struct when registering the provider.
2161  */
genpd_xlate_onecell(struct of_phandle_args * genpdspec,void * data)2162 static struct generic_pm_domain *genpd_xlate_onecell(
2163 					struct of_phandle_args *genpdspec,
2164 					void *data)
2165 {
2166 	struct genpd_onecell_data *genpd_data = data;
2167 	unsigned int idx = genpdspec->args[0];
2168 
2169 	if (genpdspec->args_count != 1)
2170 		return ERR_PTR(-EINVAL);
2171 
2172 	if (idx >= genpd_data->num_domains) {
2173 		pr_err("%s: invalid domain index %u\n", __func__, idx);
2174 		return ERR_PTR(-EINVAL);
2175 	}
2176 
2177 	if (!genpd_data->domains[idx])
2178 		return ERR_PTR(-ENOENT);
2179 
2180 	return genpd_data->domains[idx];
2181 }
2182 
2183 /**
2184  * genpd_add_provider() - Register a PM domain provider for a node
2185  * @np: Device node pointer associated with the PM domain provider.
2186  * @xlate: Callback for decoding PM domain from phandle arguments.
2187  * @data: Context pointer for @xlate callback.
2188  */
genpd_add_provider(struct device_node * np,genpd_xlate_t xlate,void * data)2189 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2190 			      void *data)
2191 {
2192 	struct of_genpd_provider *cp;
2193 
2194 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2195 	if (!cp)
2196 		return -ENOMEM;
2197 
2198 	cp->node = of_node_get(np);
2199 	cp->data = data;
2200 	cp->xlate = xlate;
2201 	fwnode_dev_initialized(&np->fwnode, true);
2202 
2203 	mutex_lock(&of_genpd_mutex);
2204 	list_add(&cp->link, &of_genpd_providers);
2205 	mutex_unlock(&of_genpd_mutex);
2206 	pr_debug("Added domain provider from %pOF\n", np);
2207 
2208 	return 0;
2209 }
2210 
genpd_present(const struct generic_pm_domain * genpd)2211 static bool genpd_present(const struct generic_pm_domain *genpd)
2212 {
2213 	const struct generic_pm_domain *gpd;
2214 
2215 	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
2216 		if (gpd == genpd)
2217 			return true;
2218 	return false;
2219 }
2220 
2221 /**
2222  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2223  * @np: Device node pointer associated with the PM domain provider.
2224  * @genpd: Pointer to PM domain associated with the PM domain provider.
2225  */
of_genpd_add_provider_simple(struct device_node * np,struct generic_pm_domain * genpd)2226 int of_genpd_add_provider_simple(struct device_node *np,
2227 				 struct generic_pm_domain *genpd)
2228 {
2229 	int ret = -EINVAL;
2230 
2231 	if (!np || !genpd)
2232 		return -EINVAL;
2233 
2234 	mutex_lock(&gpd_list_lock);
2235 
2236 	if (!genpd_present(genpd))
2237 		goto unlock;
2238 
2239 	genpd->dev.of_node = np;
2240 
2241 	/* Parse genpd OPP table */
2242 	if (genpd->set_performance_state) {
2243 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2244 		if (ret) {
2245 			if (ret != -EPROBE_DEFER)
2246 				dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2247 					ret);
2248 			goto unlock;
2249 		}
2250 
2251 		/*
2252 		 * Save table for faster processing while setting performance
2253 		 * state.
2254 		 */
2255 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2256 		WARN_ON(IS_ERR(genpd->opp_table));
2257 	}
2258 
2259 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2260 	if (ret) {
2261 		if (genpd->set_performance_state) {
2262 			dev_pm_opp_put_opp_table(genpd->opp_table);
2263 			dev_pm_opp_of_remove_table(&genpd->dev);
2264 		}
2265 
2266 		goto unlock;
2267 	}
2268 
2269 	genpd->provider = &np->fwnode;
2270 	genpd->has_provider = true;
2271 
2272 unlock:
2273 	mutex_unlock(&gpd_list_lock);
2274 
2275 	return ret;
2276 }
2277 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2278 
2279 /**
2280  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2281  * @np: Device node pointer associated with the PM domain provider.
2282  * @data: Pointer to the data associated with the PM domain provider.
2283  */
of_genpd_add_provider_onecell(struct device_node * np,struct genpd_onecell_data * data)2284 int of_genpd_add_provider_onecell(struct device_node *np,
2285 				  struct genpd_onecell_data *data)
2286 {
2287 	struct generic_pm_domain *genpd;
2288 	unsigned int i;
2289 	int ret = -EINVAL;
2290 
2291 	if (!np || !data)
2292 		return -EINVAL;
2293 
2294 	mutex_lock(&gpd_list_lock);
2295 
2296 	if (!data->xlate)
2297 		data->xlate = genpd_xlate_onecell;
2298 
2299 	for (i = 0; i < data->num_domains; i++) {
2300 		genpd = data->domains[i];
2301 
2302 		if (!genpd)
2303 			continue;
2304 		if (!genpd_present(genpd))
2305 			goto error;
2306 
2307 		genpd->dev.of_node = np;
2308 
2309 		/* Parse genpd OPP table */
2310 		if (genpd->set_performance_state) {
2311 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2312 			if (ret) {
2313 				if (ret != -EPROBE_DEFER)
2314 					dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2315 						i, ret);
2316 				goto error;
2317 			}
2318 
2319 			/*
2320 			 * Save table for faster processing while setting
2321 			 * performance state.
2322 			 */
2323 			genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2324 			WARN_ON(IS_ERR(genpd->opp_table));
2325 		}
2326 
2327 		genpd->provider = &np->fwnode;
2328 		genpd->has_provider = true;
2329 	}
2330 
2331 	ret = genpd_add_provider(np, data->xlate, data);
2332 	if (ret < 0)
2333 		goto error;
2334 
2335 	mutex_unlock(&gpd_list_lock);
2336 
2337 	return 0;
2338 
2339 error:
2340 	while (i--) {
2341 		genpd = data->domains[i];
2342 
2343 		if (!genpd)
2344 			continue;
2345 
2346 		genpd->provider = NULL;
2347 		genpd->has_provider = false;
2348 
2349 		if (genpd->set_performance_state) {
2350 			dev_pm_opp_put_opp_table(genpd->opp_table);
2351 			dev_pm_opp_of_remove_table(&genpd->dev);
2352 		}
2353 	}
2354 
2355 	mutex_unlock(&gpd_list_lock);
2356 
2357 	return ret;
2358 }
2359 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2360 
2361 /**
2362  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2363  * @np: Device node pointer associated with the PM domain provider
2364  */
of_genpd_del_provider(struct device_node * np)2365 void of_genpd_del_provider(struct device_node *np)
2366 {
2367 	struct of_genpd_provider *cp, *tmp;
2368 	struct generic_pm_domain *gpd;
2369 
2370 	mutex_lock(&gpd_list_lock);
2371 	mutex_lock(&of_genpd_mutex);
2372 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2373 		if (cp->node == np) {
2374 			/*
2375 			 * For each PM domain associated with the
2376 			 * provider, set the 'has_provider' to false
2377 			 * so that the PM domain can be safely removed.
2378 			 */
2379 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2380 				if (gpd->provider == &np->fwnode) {
2381 					gpd->has_provider = false;
2382 
2383 					if (!gpd->set_performance_state)
2384 						continue;
2385 
2386 					dev_pm_opp_put_opp_table(gpd->opp_table);
2387 					dev_pm_opp_of_remove_table(&gpd->dev);
2388 				}
2389 			}
2390 
2391 			fwnode_dev_initialized(&cp->node->fwnode, false);
2392 			list_del(&cp->link);
2393 			of_node_put(cp->node);
2394 			kfree(cp);
2395 			break;
2396 		}
2397 	}
2398 	mutex_unlock(&of_genpd_mutex);
2399 	mutex_unlock(&gpd_list_lock);
2400 }
2401 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2402 
2403 /**
2404  * genpd_get_from_provider() - Look-up PM domain
2405  * @genpdspec: OF phandle args to use for look-up
2406  *
2407  * Looks for a PM domain provider under the node specified by @genpdspec and if
2408  * found, uses xlate function of the provider to map phandle args to a PM
2409  * domain.
2410  *
2411  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2412  * on failure.
2413  */
genpd_get_from_provider(struct of_phandle_args * genpdspec)2414 static struct generic_pm_domain *genpd_get_from_provider(
2415 					struct of_phandle_args *genpdspec)
2416 {
2417 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2418 	struct of_genpd_provider *provider;
2419 
2420 	if (!genpdspec)
2421 		return ERR_PTR(-EINVAL);
2422 
2423 	mutex_lock(&of_genpd_mutex);
2424 
2425 	/* Check if we have such a provider in our array */
2426 	list_for_each_entry(provider, &of_genpd_providers, link) {
2427 		if (provider->node == genpdspec->np)
2428 			genpd = provider->xlate(genpdspec, provider->data);
2429 		if (!IS_ERR(genpd))
2430 			break;
2431 	}
2432 
2433 	mutex_unlock(&of_genpd_mutex);
2434 
2435 	return genpd;
2436 }
2437 
2438 /**
2439  * of_genpd_add_device() - Add a device to an I/O PM domain
2440  * @genpdspec: OF phandle args to use for look-up PM domain
2441  * @dev: Device to be added.
2442  *
2443  * Looks-up an I/O PM domain based upon phandle args provided and adds
2444  * the device to the PM domain. Returns a negative error code on failure.
2445  */
of_genpd_add_device(struct of_phandle_args * genpdspec,struct device * dev)2446 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2447 {
2448 	struct generic_pm_domain *genpd;
2449 	int ret;
2450 
2451 	mutex_lock(&gpd_list_lock);
2452 
2453 	genpd = genpd_get_from_provider(genpdspec);
2454 	if (IS_ERR(genpd)) {
2455 		ret = PTR_ERR(genpd);
2456 		goto out;
2457 	}
2458 
2459 	ret = genpd_add_device(genpd, dev, dev);
2460 
2461 out:
2462 	mutex_unlock(&gpd_list_lock);
2463 
2464 	return ret;
2465 }
2466 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2467 
2468 /**
2469  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2470  * @parent_spec: OF phandle args to use for parent PM domain look-up
2471  * @subdomain_spec: OF phandle args to use for subdomain look-up
2472  *
2473  * Looks-up a parent PM domain and subdomain based upon phandle args
2474  * provided and adds the subdomain to the parent PM domain. Returns a
2475  * negative error code on failure.
2476  */
of_genpd_add_subdomain(struct of_phandle_args * parent_spec,struct of_phandle_args * subdomain_spec)2477 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2478 			   struct of_phandle_args *subdomain_spec)
2479 {
2480 	struct generic_pm_domain *parent, *subdomain;
2481 	int ret;
2482 
2483 	mutex_lock(&gpd_list_lock);
2484 
2485 	parent = genpd_get_from_provider(parent_spec);
2486 	if (IS_ERR(parent)) {
2487 		ret = PTR_ERR(parent);
2488 		goto out;
2489 	}
2490 
2491 	subdomain = genpd_get_from_provider(subdomain_spec);
2492 	if (IS_ERR(subdomain)) {
2493 		ret = PTR_ERR(subdomain);
2494 		goto out;
2495 	}
2496 
2497 	ret = genpd_add_subdomain(parent, subdomain);
2498 
2499 out:
2500 	mutex_unlock(&gpd_list_lock);
2501 
2502 	return ret;
2503 }
2504 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2505 
2506 /**
2507  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2508  * @parent_spec: OF phandle args to use for parent PM domain look-up
2509  * @subdomain_spec: OF phandle args to use for subdomain look-up
2510  *
2511  * Looks-up a parent PM domain and subdomain based upon phandle args
2512  * provided and removes the subdomain from the parent PM domain. Returns a
2513  * negative error code on failure.
2514  */
of_genpd_remove_subdomain(struct of_phandle_args * parent_spec,struct of_phandle_args * subdomain_spec)2515 int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2516 			      struct of_phandle_args *subdomain_spec)
2517 {
2518 	struct generic_pm_domain *parent, *subdomain;
2519 	int ret;
2520 
2521 	mutex_lock(&gpd_list_lock);
2522 
2523 	parent = genpd_get_from_provider(parent_spec);
2524 	if (IS_ERR(parent)) {
2525 		ret = PTR_ERR(parent);
2526 		goto out;
2527 	}
2528 
2529 	subdomain = genpd_get_from_provider(subdomain_spec);
2530 	if (IS_ERR(subdomain)) {
2531 		ret = PTR_ERR(subdomain);
2532 		goto out;
2533 	}
2534 
2535 	ret = pm_genpd_remove_subdomain(parent, subdomain);
2536 
2537 out:
2538 	mutex_unlock(&gpd_list_lock);
2539 
2540 	return ret;
2541 }
2542 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2543 
2544 /**
2545  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2546  * @provider: Pointer to device structure associated with provider
2547  *
2548  * Find the last PM domain that was added by a particular provider and
2549  * remove this PM domain from the list of PM domains. The provider is
2550  * identified by the 'provider' device structure that is passed. The PM
2551  * domain will only be removed, if the provider associated with domain
2552  * has been removed.
2553  *
2554  * Returns a valid pointer to struct generic_pm_domain on success or
2555  * ERR_PTR() on failure.
2556  */
of_genpd_remove_last(struct device_node * np)2557 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2558 {
2559 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2560 	int ret;
2561 
2562 	if (IS_ERR_OR_NULL(np))
2563 		return ERR_PTR(-EINVAL);
2564 
2565 	mutex_lock(&gpd_list_lock);
2566 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2567 		if (gpd->provider == &np->fwnode) {
2568 			ret = genpd_remove(gpd);
2569 			genpd = ret ? ERR_PTR(ret) : gpd;
2570 			break;
2571 		}
2572 	}
2573 	mutex_unlock(&gpd_list_lock);
2574 
2575 	return genpd;
2576 }
2577 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2578 
genpd_release_dev(struct device * dev)2579 static void genpd_release_dev(struct device *dev)
2580 {
2581 	of_node_put(dev->of_node);
2582 	kfree(dev);
2583 }
2584 
2585 static struct bus_type genpd_bus_type = {
2586 	.name		= "genpd",
2587 };
2588 
2589 /**
2590  * genpd_dev_pm_detach - Detach a device from its PM domain.
2591  * @dev: Device to detach.
2592  * @power_off: Currently not used
2593  *
2594  * Try to locate a corresponding generic PM domain, which the device was
2595  * attached to previously. If such is found, the device is detached from it.
2596  */
genpd_dev_pm_detach(struct device * dev,bool power_off)2597 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2598 {
2599 	struct generic_pm_domain *pd;
2600 	unsigned int i;
2601 	int ret = 0;
2602 
2603 	pd = dev_to_genpd(dev);
2604 	if (IS_ERR(pd))
2605 		return;
2606 
2607 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2608 
2609 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2610 		ret = genpd_remove_device(pd, dev);
2611 		if (ret != -EAGAIN)
2612 			break;
2613 
2614 		mdelay(i);
2615 		cond_resched();
2616 	}
2617 
2618 	if (ret < 0) {
2619 		dev_err(dev, "failed to remove from PM domain %s: %d",
2620 			pd->name, ret);
2621 		return;
2622 	}
2623 
2624 	/* Check if PM domain can be powered off after removing this device. */
2625 	genpd_queue_power_off_work(pd);
2626 
2627 	/* Unregister the device if it was created by genpd. */
2628 	if (dev->bus == &genpd_bus_type)
2629 		device_unregister(dev);
2630 }
2631 
genpd_dev_pm_sync(struct device * dev)2632 static void genpd_dev_pm_sync(struct device *dev)
2633 {
2634 	struct generic_pm_domain *pd;
2635 
2636 	pd = dev_to_genpd(dev);
2637 	if (IS_ERR(pd))
2638 		return;
2639 
2640 	genpd_queue_power_off_work(pd);
2641 }
2642 
__genpd_dev_pm_attach(struct device * dev,struct device * base_dev,unsigned int index,bool power_on)2643 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2644 				 unsigned int index, bool power_on)
2645 {
2646 	struct of_phandle_args pd_args;
2647 	struct generic_pm_domain *pd;
2648 	int ret;
2649 
2650 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2651 				"#power-domain-cells", index, &pd_args);
2652 	if (ret < 0)
2653 		return ret;
2654 
2655 	mutex_lock(&gpd_list_lock);
2656 	pd = genpd_get_from_provider(&pd_args);
2657 	of_node_put(pd_args.np);
2658 	if (IS_ERR(pd)) {
2659 		mutex_unlock(&gpd_list_lock);
2660 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2661 			__func__, PTR_ERR(pd));
2662 		return driver_deferred_probe_check_state(base_dev);
2663 	}
2664 
2665 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2666 
2667 	ret = genpd_add_device(pd, dev, base_dev);
2668 	mutex_unlock(&gpd_list_lock);
2669 
2670 	if (ret < 0) {
2671 		if (ret != -EPROBE_DEFER)
2672 			dev_err(dev, "failed to add to PM domain %s: %d",
2673 				pd->name, ret);
2674 		return ret;
2675 	}
2676 
2677 	dev->pm_domain->detach = genpd_dev_pm_detach;
2678 	dev->pm_domain->sync = genpd_dev_pm_sync;
2679 
2680 	if (power_on) {
2681 		genpd_lock(pd);
2682 		ret = genpd_power_on(pd, 0);
2683 		genpd_unlock(pd);
2684 	}
2685 
2686 	if (ret)
2687 		genpd_remove_device(pd, dev);
2688 
2689 	return ret ? -EPROBE_DEFER : 1;
2690 }
2691 
2692 /**
2693  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2694  * @dev: Device to attach.
2695  *
2696  * Parse device's OF node to find a PM domain specifier. If such is found,
2697  * attaches the device to retrieved pm_domain ops.
2698  *
2699  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2700  * PM domain or when multiple power-domains exists for it, else a negative error
2701  * code. Note that if a power-domain exists for the device, but it cannot be
2702  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2703  * not probed and to re-try again later.
2704  */
genpd_dev_pm_attach(struct device * dev)2705 int genpd_dev_pm_attach(struct device *dev)
2706 {
2707 	if (!dev->of_node)
2708 		return 0;
2709 
2710 	/*
2711 	 * Devices with multiple PM domains must be attached separately, as we
2712 	 * can only attach one PM domain per device.
2713 	 */
2714 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2715 				       "#power-domain-cells") != 1)
2716 		return 0;
2717 
2718 	return __genpd_dev_pm_attach(dev, dev, 0, true);
2719 }
2720 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2721 
2722 /**
2723  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2724  * @dev: The device used to lookup the PM domain.
2725  * @index: The index of the PM domain.
2726  *
2727  * Parse device's OF node to find a PM domain specifier at the provided @index.
2728  * If such is found, creates a virtual device and attaches it to the retrieved
2729  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2730  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2731  *
2732  * Returns the created virtual device if successfully attached PM domain, NULL
2733  * when the device don't need a PM domain, else an ERR_PTR() in case of
2734  * failures. If a power-domain exists for the device, but cannot be found or
2735  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2736  * is not probed and to re-try again later.
2737  */
genpd_dev_pm_attach_by_id(struct device * dev,unsigned int index)2738 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2739 					 unsigned int index)
2740 {
2741 	struct device *virt_dev;
2742 	int num_domains;
2743 	int ret;
2744 
2745 	if (!dev->of_node)
2746 		return NULL;
2747 
2748 	/* Verify that the index is within a valid range. */
2749 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2750 						 "#power-domain-cells");
2751 	if (index >= num_domains)
2752 		return NULL;
2753 
2754 	/* Allocate and register device on the genpd bus. */
2755 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2756 	if (!virt_dev)
2757 		return ERR_PTR(-ENOMEM);
2758 
2759 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2760 	virt_dev->bus = &genpd_bus_type;
2761 	virt_dev->release = genpd_release_dev;
2762 	virt_dev->of_node = of_node_get(dev->of_node);
2763 
2764 	ret = device_register(virt_dev);
2765 	if (ret) {
2766 		put_device(virt_dev);
2767 		return ERR_PTR(ret);
2768 	}
2769 
2770 	/* Try to attach the device to the PM domain at the specified index. */
2771 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2772 	if (ret < 1) {
2773 		device_unregister(virt_dev);
2774 		return ret ? ERR_PTR(ret) : NULL;
2775 	}
2776 
2777 	pm_runtime_enable(virt_dev);
2778 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2779 
2780 	return virt_dev;
2781 }
2782 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2783 
2784 /**
2785  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2786  * @dev: The device used to lookup the PM domain.
2787  * @name: The name of the PM domain.
2788  *
2789  * Parse device's OF node to find a PM domain specifier using the
2790  * power-domain-names DT property. For further description see
2791  * genpd_dev_pm_attach_by_id().
2792  */
genpd_dev_pm_attach_by_name(struct device * dev,const char * name)2793 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2794 {
2795 	int index;
2796 
2797 	if (!dev->of_node)
2798 		return NULL;
2799 
2800 	index = of_property_match_string(dev->of_node, "power-domain-names",
2801 					 name);
2802 	if (index < 0)
2803 		return NULL;
2804 
2805 	return genpd_dev_pm_attach_by_id(dev, index);
2806 }
2807 
2808 static const struct of_device_id idle_state_match[] = {
2809 	{ .compatible = "domain-idle-state", },
2810 	{ }
2811 };
2812 
genpd_parse_state(struct genpd_power_state * genpd_state,struct device_node * state_node)2813 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2814 				    struct device_node *state_node)
2815 {
2816 	int err;
2817 	u32 residency;
2818 	u32 entry_latency, exit_latency;
2819 
2820 	err = of_property_read_u32(state_node, "entry-latency-us",
2821 						&entry_latency);
2822 	if (err) {
2823 		pr_debug(" * %pOF missing entry-latency-us property\n",
2824 			 state_node);
2825 		return -EINVAL;
2826 	}
2827 
2828 	err = of_property_read_u32(state_node, "exit-latency-us",
2829 						&exit_latency);
2830 	if (err) {
2831 		pr_debug(" * %pOF missing exit-latency-us property\n",
2832 			 state_node);
2833 		return -EINVAL;
2834 	}
2835 
2836 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2837 	if (!err)
2838 		genpd_state->residency_ns = 1000LL * residency;
2839 
2840 	genpd_state->power_on_latency_ns = 1000LL * exit_latency;
2841 	genpd_state->power_off_latency_ns = 1000LL * entry_latency;
2842 	genpd_state->fwnode = &state_node->fwnode;
2843 
2844 	return 0;
2845 }
2846 
genpd_iterate_idle_states(struct device_node * dn,struct genpd_power_state * states)2847 static int genpd_iterate_idle_states(struct device_node *dn,
2848 				     struct genpd_power_state *states)
2849 {
2850 	int ret;
2851 	struct of_phandle_iterator it;
2852 	struct device_node *np;
2853 	int i = 0;
2854 
2855 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2856 	if (ret <= 0)
2857 		return ret == -ENOENT ? 0 : ret;
2858 
2859 	/* Loop over the phandles until all the requested entry is found */
2860 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2861 		np = it.node;
2862 		if (!of_match_node(idle_state_match, np))
2863 			continue;
2864 
2865 		if (!of_device_is_available(np))
2866 			continue;
2867 
2868 		if (states) {
2869 			ret = genpd_parse_state(&states[i], np);
2870 			if (ret) {
2871 				pr_err("Parsing idle state node %pOF failed with err %d\n",
2872 				       np, ret);
2873 				of_node_put(np);
2874 				return ret;
2875 			}
2876 		}
2877 		i++;
2878 	}
2879 
2880 	return i;
2881 }
2882 
2883 /**
2884  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2885  *
2886  * @dn: The genpd device node
2887  * @states: The pointer to which the state array will be saved.
2888  * @n: The count of elements in the array returned from this function.
2889  *
2890  * Returns the device states parsed from the OF node. The memory for the states
2891  * is allocated by this function and is the responsibility of the caller to
2892  * free the memory after use. If any or zero compatible domain idle states is
2893  * found it returns 0 and in case of errors, a negative error code is returned.
2894  */
of_genpd_parse_idle_states(struct device_node * dn,struct genpd_power_state ** states,int * n)2895 int of_genpd_parse_idle_states(struct device_node *dn,
2896 			struct genpd_power_state **states, int *n)
2897 {
2898 	struct genpd_power_state *st;
2899 	int ret;
2900 
2901 	ret = genpd_iterate_idle_states(dn, NULL);
2902 	if (ret < 0)
2903 		return ret;
2904 
2905 	if (!ret) {
2906 		*states = NULL;
2907 		*n = 0;
2908 		return 0;
2909 	}
2910 
2911 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2912 	if (!st)
2913 		return -ENOMEM;
2914 
2915 	ret = genpd_iterate_idle_states(dn, st);
2916 	if (ret <= 0) {
2917 		kfree(st);
2918 		return ret < 0 ? ret : -EINVAL;
2919 	}
2920 
2921 	*states = st;
2922 	*n = ret;
2923 
2924 	return 0;
2925 }
2926 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2927 
2928 /**
2929  * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2930  *
2931  * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2932  * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2933  *	state.
2934  *
2935  * Returns performance state encoded in the OPP of the genpd. This calls
2936  * platform specific genpd->opp_to_performance_state() callback to translate
2937  * power domain OPP to performance state.
2938  *
2939  * Returns performance state on success and 0 on failure.
2940  */
pm_genpd_opp_to_performance_state(struct device * genpd_dev,struct dev_pm_opp * opp)2941 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2942 					       struct dev_pm_opp *opp)
2943 {
2944 	struct generic_pm_domain *genpd = NULL;
2945 	int state;
2946 
2947 	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2948 
2949 	if (unlikely(!genpd->opp_to_performance_state))
2950 		return 0;
2951 
2952 	genpd_lock(genpd);
2953 	state = genpd->opp_to_performance_state(genpd, opp);
2954 	genpd_unlock(genpd);
2955 
2956 	return state;
2957 }
2958 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2959 
genpd_bus_init(void)2960 static int __init genpd_bus_init(void)
2961 {
2962 	return bus_register(&genpd_bus_type);
2963 }
2964 core_initcall(genpd_bus_init);
2965 
2966 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2967 
2968 
2969 /***        debugfs support        ***/
2970 
2971 #ifdef CONFIG_DEBUG_FS
2972 /*
2973  * TODO: This function is a slightly modified version of rtpm_status_show
2974  * from sysfs.c, so generalize it.
2975  */
rtpm_status_str(struct seq_file * s,struct device * dev)2976 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2977 {
2978 	static const char * const status_lookup[] = {
2979 		[RPM_ACTIVE] = "active",
2980 		[RPM_RESUMING] = "resuming",
2981 		[RPM_SUSPENDED] = "suspended",
2982 		[RPM_SUSPENDING] = "suspending"
2983 	};
2984 	const char *p = "";
2985 
2986 	if (dev->power.runtime_error)
2987 		p = "error";
2988 	else if (dev->power.disable_depth)
2989 		p = "unsupported";
2990 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2991 		p = status_lookup[dev->power.runtime_status];
2992 	else
2993 		WARN_ON(1);
2994 
2995 	seq_puts(s, p);
2996 }
2997 
genpd_summary_one(struct seq_file * s,struct generic_pm_domain * genpd)2998 static int genpd_summary_one(struct seq_file *s,
2999 			struct generic_pm_domain *genpd)
3000 {
3001 	static const char * const status_lookup[] = {
3002 		[GENPD_STATE_ON] = "on",
3003 		[GENPD_STATE_OFF] = "off"
3004 	};
3005 	struct pm_domain_data *pm_data;
3006 	const char *kobj_path;
3007 	struct gpd_link *link;
3008 	char state[16];
3009 	int ret;
3010 
3011 	ret = genpd_lock_interruptible(genpd);
3012 	if (ret)
3013 		return -ERESTARTSYS;
3014 
3015 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3016 		goto exit;
3017 	if (!genpd_status_on(genpd))
3018 		snprintf(state, sizeof(state), "%s-%u",
3019 			 status_lookup[genpd->status], genpd->state_idx);
3020 	else
3021 		snprintf(state, sizeof(state), "%s",
3022 			 status_lookup[genpd->status]);
3023 	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
3024 
3025 	/*
3026 	 * Modifications on the list require holding locks on both
3027 	 * parent and child, so we are safe.
3028 	 * Also genpd->name is immutable.
3029 	 */
3030 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3031 		seq_printf(s, "%s", link->child->name);
3032 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3033 			seq_puts(s, ", ");
3034 	}
3035 
3036 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3037 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3038 				genpd_is_irq_safe(genpd) ?
3039 				GFP_ATOMIC : GFP_KERNEL);
3040 		if (kobj_path == NULL)
3041 			continue;
3042 
3043 		seq_printf(s, "\n    %-50s  ", kobj_path);
3044 		rtpm_status_str(s, pm_data->dev);
3045 		kfree(kobj_path);
3046 	}
3047 
3048 	seq_puts(s, "\n");
3049 exit:
3050 	genpd_unlock(genpd);
3051 
3052 	return 0;
3053 }
3054 
summary_show(struct seq_file * s,void * data)3055 static int summary_show(struct seq_file *s, void *data)
3056 {
3057 	struct generic_pm_domain *genpd;
3058 	int ret = 0;
3059 
3060 	seq_puts(s, "domain                          status          children\n");
3061 	seq_puts(s, "    /device                                             runtime status\n");
3062 	seq_puts(s, "----------------------------------------------------------------------\n");
3063 
3064 	ret = mutex_lock_interruptible(&gpd_list_lock);
3065 	if (ret)
3066 		return -ERESTARTSYS;
3067 
3068 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3069 		ret = genpd_summary_one(s, genpd);
3070 		if (ret)
3071 			break;
3072 	}
3073 	mutex_unlock(&gpd_list_lock);
3074 
3075 	return ret;
3076 }
3077 
status_show(struct seq_file * s,void * data)3078 static int status_show(struct seq_file *s, void *data)
3079 {
3080 	static const char * const status_lookup[] = {
3081 		[GENPD_STATE_ON] = "on",
3082 		[GENPD_STATE_OFF] = "off"
3083 	};
3084 
3085 	struct generic_pm_domain *genpd = s->private;
3086 	int ret = 0;
3087 
3088 	ret = genpd_lock_interruptible(genpd);
3089 	if (ret)
3090 		return -ERESTARTSYS;
3091 
3092 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3093 		goto exit;
3094 
3095 	if (genpd->status == GENPD_STATE_OFF)
3096 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3097 			genpd->state_idx);
3098 	else
3099 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3100 exit:
3101 	genpd_unlock(genpd);
3102 	return ret;
3103 }
3104 
sub_domains_show(struct seq_file * s,void * data)3105 static int sub_domains_show(struct seq_file *s, void *data)
3106 {
3107 	struct generic_pm_domain *genpd = s->private;
3108 	struct gpd_link *link;
3109 	int ret = 0;
3110 
3111 	ret = genpd_lock_interruptible(genpd);
3112 	if (ret)
3113 		return -ERESTARTSYS;
3114 
3115 	list_for_each_entry(link, &genpd->parent_links, parent_node)
3116 		seq_printf(s, "%s\n", link->child->name);
3117 
3118 	genpd_unlock(genpd);
3119 	return ret;
3120 }
3121 
idle_states_show(struct seq_file * s,void * data)3122 static int idle_states_show(struct seq_file *s, void *data)
3123 {
3124 	struct generic_pm_domain *genpd = s->private;
3125 	unsigned int i;
3126 	int ret = 0;
3127 
3128 	ret = genpd_lock_interruptible(genpd);
3129 	if (ret)
3130 		return -ERESTARTSYS;
3131 
3132 	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3133 
3134 	for (i = 0; i < genpd->state_count; i++) {
3135 		ktime_t delta = 0;
3136 		s64 msecs;
3137 
3138 		if ((genpd->status == GENPD_STATE_OFF) &&
3139 				(genpd->state_idx == i))
3140 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
3141 
3142 		msecs = ktime_to_ms(
3143 			ktime_add(genpd->states[i].idle_time, delta));
3144 		seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs,
3145 			      genpd->states[i].usage, genpd->states[i].rejected);
3146 	}
3147 
3148 	genpd_unlock(genpd);
3149 	return ret;
3150 }
3151 
active_time_show(struct seq_file * s,void * data)3152 static int active_time_show(struct seq_file *s, void *data)
3153 {
3154 	struct generic_pm_domain *genpd = s->private;
3155 	ktime_t delta = 0;
3156 	int ret = 0;
3157 
3158 	ret = genpd_lock_interruptible(genpd);
3159 	if (ret)
3160 		return -ERESTARTSYS;
3161 
3162 	if (genpd->status == GENPD_STATE_ON)
3163 		delta = ktime_sub(ktime_get(), genpd->accounting_time);
3164 
3165 	seq_printf(s, "%lld ms\n", ktime_to_ms(
3166 				ktime_add(genpd->on_time, delta)));
3167 
3168 	genpd_unlock(genpd);
3169 	return ret;
3170 }
3171 
total_idle_time_show(struct seq_file * s,void * data)3172 static int total_idle_time_show(struct seq_file *s, void *data)
3173 {
3174 	struct generic_pm_domain *genpd = s->private;
3175 	ktime_t delta = 0, total = 0;
3176 	unsigned int i;
3177 	int ret = 0;
3178 
3179 	ret = genpd_lock_interruptible(genpd);
3180 	if (ret)
3181 		return -ERESTARTSYS;
3182 
3183 	for (i = 0; i < genpd->state_count; i++) {
3184 
3185 		if ((genpd->status == GENPD_STATE_OFF) &&
3186 				(genpd->state_idx == i))
3187 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
3188 
3189 		total = ktime_add(total, genpd->states[i].idle_time);
3190 	}
3191 	total = ktime_add(total, delta);
3192 
3193 	seq_printf(s, "%lld ms\n", ktime_to_ms(total));
3194 
3195 	genpd_unlock(genpd);
3196 	return ret;
3197 }
3198 
3199 
devices_show(struct seq_file * s,void * data)3200 static int devices_show(struct seq_file *s, void *data)
3201 {
3202 	struct generic_pm_domain *genpd = s->private;
3203 	struct pm_domain_data *pm_data;
3204 	const char *kobj_path;
3205 	int ret = 0;
3206 
3207 	ret = genpd_lock_interruptible(genpd);
3208 	if (ret)
3209 		return -ERESTARTSYS;
3210 
3211 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3212 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3213 				genpd_is_irq_safe(genpd) ?
3214 				GFP_ATOMIC : GFP_KERNEL);
3215 		if (kobj_path == NULL)
3216 			continue;
3217 
3218 		seq_printf(s, "%s\n", kobj_path);
3219 		kfree(kobj_path);
3220 	}
3221 
3222 	genpd_unlock(genpd);
3223 	return ret;
3224 }
3225 
perf_state_show(struct seq_file * s,void * data)3226 static int perf_state_show(struct seq_file *s, void *data)
3227 {
3228 	struct generic_pm_domain *genpd = s->private;
3229 
3230 	if (genpd_lock_interruptible(genpd))
3231 		return -ERESTARTSYS;
3232 
3233 	seq_printf(s, "%u\n", genpd->performance_state);
3234 
3235 	genpd_unlock(genpd);
3236 	return 0;
3237 }
3238 
3239 DEFINE_SHOW_ATTRIBUTE(summary);
3240 DEFINE_SHOW_ATTRIBUTE(status);
3241 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3242 DEFINE_SHOW_ATTRIBUTE(idle_states);
3243 DEFINE_SHOW_ATTRIBUTE(active_time);
3244 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3245 DEFINE_SHOW_ATTRIBUTE(devices);
3246 DEFINE_SHOW_ATTRIBUTE(perf_state);
3247 
genpd_debug_add(struct generic_pm_domain * genpd)3248 static void genpd_debug_add(struct generic_pm_domain *genpd)
3249 {
3250 	struct dentry *d;
3251 
3252 	if (!genpd_debugfs_dir)
3253 		return;
3254 
3255 	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3256 
3257 	debugfs_create_file("current_state", 0444,
3258 			    d, genpd, &status_fops);
3259 	debugfs_create_file("sub_domains", 0444,
3260 			    d, genpd, &sub_domains_fops);
3261 	debugfs_create_file("idle_states", 0444,
3262 			    d, genpd, &idle_states_fops);
3263 	debugfs_create_file("active_time", 0444,
3264 			    d, genpd, &active_time_fops);
3265 	debugfs_create_file("total_idle_time", 0444,
3266 			    d, genpd, &total_idle_time_fops);
3267 	debugfs_create_file("devices", 0444,
3268 			    d, genpd, &devices_fops);
3269 	if (genpd->set_performance_state)
3270 		debugfs_create_file("perf_state", 0444,
3271 				    d, genpd, &perf_state_fops);
3272 }
3273 
genpd_debug_init(void)3274 static int __init genpd_debug_init(void)
3275 {
3276 	struct generic_pm_domain *genpd;
3277 
3278 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3279 
3280 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3281 			    NULL, &summary_fops);
3282 
3283 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3284 		genpd_debug_add(genpd);
3285 
3286 	return 0;
3287 }
3288 late_initcall(genpd_debug_init);
3289 
genpd_debug_exit(void)3290 static void __exit genpd_debug_exit(void)
3291 {
3292 	debugfs_remove_recursive(genpd_debugfs_dir);
3293 }
3294 __exitcall(genpd_debug_exit);
3295 #endif /* CONFIG_DEBUG_FS */
3296