1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/domain_governor.c - Governors for device PM domains.
4 *
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 */
7 #include <linux/kernel.h>
8 #include <linux/pm_domain.h>
9 #include <linux/pm_qos.h>
10 #include <linux/hrtimer.h>
11 #include <linux/cpuidle.h>
12 #include <linux/cpumask.h>
13 #include <linux/ktime.h>
14
15 #include <trace/hooks/pm_domain.h>
16
dev_update_qos_constraint(struct device * dev,void * data)17 static int dev_update_qos_constraint(struct device *dev, void *data)
18 {
19 s64 *constraint_ns_p = data;
20 s64 constraint_ns;
21
22 if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
23 /*
24 * Only take suspend-time QoS constraints of devices into
25 * account, because constraints updated after the device has
26 * been suspended are not guaranteed to be taken into account
27 * anyway. In order for them to take effect, the device has to
28 * be resumed and suspended again.
29 */
30 constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
31 } else {
32 /*
33 * The child is not in a domain and there's no info on its
34 * suspend/resume latencies, so assume them to be negligible and
35 * take its current PM QoS constraint (that's the only thing
36 * known at this point anyway).
37 */
38 constraint_ns = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
39 constraint_ns *= NSEC_PER_USEC;
40 }
41
42 if (constraint_ns < *constraint_ns_p)
43 *constraint_ns_p = constraint_ns;
44
45 return 0;
46 }
47
48 /**
49 * default_suspend_ok - Default PM domain governor routine to suspend devices.
50 * @dev: Device to check.
51 */
default_suspend_ok(struct device * dev)52 static bool default_suspend_ok(struct device *dev)
53 {
54 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
55 unsigned long flags;
56 s64 constraint_ns;
57
58 dev_dbg(dev, "%s()\n", __func__);
59
60 spin_lock_irqsave(&dev->power.lock, flags);
61
62 if (!td->constraint_changed) {
63 bool ret = td->cached_suspend_ok;
64
65 spin_unlock_irqrestore(&dev->power.lock, flags);
66 return ret;
67 }
68 td->constraint_changed = false;
69 td->cached_suspend_ok = false;
70 td->effective_constraint_ns = 0;
71 constraint_ns = __dev_pm_qos_resume_latency(dev);
72
73 spin_unlock_irqrestore(&dev->power.lock, flags);
74
75 if (constraint_ns == 0)
76 return false;
77
78 constraint_ns *= NSEC_PER_USEC;
79 /*
80 * We can walk the children without any additional locking, because
81 * they all have been suspended at this point and their
82 * effective_constraint_ns fields won't be modified in parallel with us.
83 */
84 if (!dev->power.ignore_children)
85 device_for_each_child(dev, &constraint_ns,
86 dev_update_qos_constraint);
87
88 if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
89 /* "No restriction", so the device is allowed to suspend. */
90 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
91 td->cached_suspend_ok = true;
92 } else if (constraint_ns == 0) {
93 /*
94 * This triggers if one of the children that don't belong to a
95 * domain has a zero PM QoS constraint and it's better not to
96 * suspend then. effective_constraint_ns is zero already and
97 * cached_suspend_ok is false, so bail out.
98 */
99 return false;
100 } else {
101 constraint_ns -= td->suspend_latency_ns +
102 td->resume_latency_ns;
103 /*
104 * effective_constraint_ns is zero already and cached_suspend_ok
105 * is false, so if the computed value is not positive, return
106 * right away.
107 */
108 if (constraint_ns <= 0)
109 return false;
110
111 td->effective_constraint_ns = constraint_ns;
112 td->cached_suspend_ok = true;
113 }
114
115 /*
116 * The children have been suspended already, so we don't need to take
117 * their suspend latencies into account here.
118 */
119 return td->cached_suspend_ok;
120 }
121
update_domain_next_wakeup(struct generic_pm_domain * genpd,ktime_t now)122 static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now)
123 {
124 ktime_t domain_wakeup = KTIME_MAX;
125 ktime_t next_wakeup;
126 struct pm_domain_data *pdd;
127 struct gpd_link *link;
128
129 if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY))
130 return;
131
132 /*
133 * Devices that have a predictable wakeup pattern, may specify
134 * their next wakeup. Let's find the next wakeup from all the
135 * devices attached to this domain and from all the sub-domains.
136 * It is possible that component's a next wakeup may have become
137 * stale when we read that here. We will ignore to ensure the domain
138 * is able to enter its optimal idle state.
139 */
140 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
141 next_wakeup = to_gpd_data(pdd)->next_wakeup;
142 if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
143 if (ktime_before(next_wakeup, domain_wakeup))
144 domain_wakeup = next_wakeup;
145 }
146
147 list_for_each_entry(link, &genpd->parent_links, parent_node) {
148 next_wakeup = link->child->next_wakeup;
149 if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
150 if (ktime_before(next_wakeup, domain_wakeup))
151 domain_wakeup = next_wakeup;
152 }
153
154 genpd->next_wakeup = domain_wakeup;
155 }
156
next_wakeup_allows_state(struct generic_pm_domain * genpd,unsigned int state,ktime_t now)157 static bool next_wakeup_allows_state(struct generic_pm_domain *genpd,
158 unsigned int state, ktime_t now)
159 {
160 ktime_t domain_wakeup = genpd->next_wakeup;
161 s64 idle_time_ns, min_sleep_ns;
162
163 min_sleep_ns = genpd->states[state].power_off_latency_ns +
164 genpd->states[state].residency_ns;
165
166 idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
167
168 return idle_time_ns >= min_sleep_ns;
169 }
170
__default_power_down_ok(struct dev_pm_domain * pd,unsigned int state)171 static bool __default_power_down_ok(struct dev_pm_domain *pd,
172 unsigned int state)
173 {
174 struct generic_pm_domain *genpd = pd_to_genpd(pd);
175 struct gpd_link *link;
176 struct pm_domain_data *pdd;
177 s64 min_off_time_ns;
178 s64 off_on_time_ns;
179 bool allow = true;
180
181 trace_android_vh_allow_domain_state(genpd, state, &allow);
182 if (!allow)
183 return false;
184
185 off_on_time_ns = genpd->states[state].power_off_latency_ns +
186 genpd->states[state].power_on_latency_ns;
187
188 min_off_time_ns = -1;
189 /*
190 * Check if subdomains can be off for enough time.
191 *
192 * All subdomains have been powered off already at this point.
193 */
194 list_for_each_entry(link, &genpd->parent_links, parent_node) {
195 struct generic_pm_domain *sd = link->child;
196 s64 sd_max_off_ns = sd->max_off_time_ns;
197
198 if (sd_max_off_ns < 0)
199 continue;
200
201 /*
202 * Check if the subdomain is allowed to be off long enough for
203 * the current domain to turn off and on (that's how much time
204 * it will have to wait worst case).
205 */
206 if (sd_max_off_ns <= off_on_time_ns)
207 return false;
208
209 if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
210 min_off_time_ns = sd_max_off_ns;
211 }
212
213 /*
214 * Check if the devices in the domain can be off enough time.
215 */
216 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
217 struct gpd_timing_data *td;
218 s64 constraint_ns;
219
220 /*
221 * Check if the device is allowed to be off long enough for the
222 * domain to turn off and on (that's how much time it will
223 * have to wait worst case).
224 */
225 td = &to_gpd_data(pdd)->td;
226 constraint_ns = td->effective_constraint_ns;
227 /*
228 * Zero means "no suspend at all" and this runs only when all
229 * devices in the domain are suspended, so it must be positive.
230 */
231 if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
232 continue;
233
234 if (constraint_ns <= off_on_time_ns)
235 return false;
236
237 if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
238 min_off_time_ns = constraint_ns;
239 }
240
241 /*
242 * If the computed minimum device off time is negative, there are no
243 * latency constraints, so the domain can spend arbitrary time in the
244 * "off" state.
245 */
246 if (min_off_time_ns < 0)
247 return true;
248
249 /*
250 * The difference between the computed minimum subdomain or device off
251 * time and the time needed to turn the domain on is the maximum
252 * theoretical time this domain can spend in the "off" state.
253 */
254 genpd->max_off_time_ns = min_off_time_ns -
255 genpd->states[state].power_on_latency_ns;
256 return true;
257 }
258
259 /**
260 * _default_power_down_ok - Default generic PM domain power off governor routine.
261 * @pd: PM domain to check.
262 *
263 * This routine must be executed under the PM domain's lock.
264 */
_default_power_down_ok(struct dev_pm_domain * pd,ktime_t now)265 static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
266 {
267 struct generic_pm_domain *genpd = pd_to_genpd(pd);
268 int state_idx = genpd->state_count - 1;
269 struct gpd_link *link;
270
271 /*
272 * Find the next wakeup from devices that can determine their own wakeup
273 * to find when the domain would wakeup and do it for every device down
274 * the hierarchy. It is not worth while to sleep if the state's residency
275 * cannot be met.
276 */
277 update_domain_next_wakeup(genpd, now);
278 if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (genpd->next_wakeup != KTIME_MAX)) {
279 /* Let's find out the deepest domain idle state, the devices prefer */
280 while (state_idx >= 0) {
281 if (next_wakeup_allows_state(genpd, state_idx, now)) {
282 genpd->max_off_time_changed = true;
283 break;
284 }
285 state_idx--;
286 }
287
288 if (state_idx < 0) {
289 state_idx = 0;
290 genpd->cached_power_down_ok = false;
291 goto done;
292 }
293 }
294
295 if (!genpd->max_off_time_changed) {
296 genpd->state_idx = genpd->cached_power_down_state_idx;
297 return genpd->cached_power_down_ok;
298 }
299
300 /*
301 * We have to invalidate the cached results for the parents, so
302 * use the observation that default_power_down_ok() is not
303 * going to be called for any parent until this instance
304 * returns.
305 */
306 list_for_each_entry(link, &genpd->child_links, child_node)
307 link->parent->max_off_time_changed = true;
308
309 genpd->max_off_time_ns = -1;
310 genpd->max_off_time_changed = false;
311 genpd->cached_power_down_ok = true;
312
313 /*
314 * Find a state to power down to, starting from the state
315 * determined by the next wakeup.
316 */
317 while (!__default_power_down_ok(pd, state_idx)) {
318 if (state_idx == 0) {
319 genpd->cached_power_down_ok = false;
320 break;
321 }
322 state_idx--;
323 }
324
325 done:
326 genpd->state_idx = state_idx;
327 genpd->cached_power_down_state_idx = genpd->state_idx;
328 return genpd->cached_power_down_ok;
329 }
330
default_power_down_ok(struct dev_pm_domain * pd)331 static bool default_power_down_ok(struct dev_pm_domain *pd)
332 {
333 return _default_power_down_ok(pd, ktime_get());
334 }
335
always_on_power_down_ok(struct dev_pm_domain * domain)336 static bool always_on_power_down_ok(struct dev_pm_domain *domain)
337 {
338 return false;
339 }
340
341 #ifdef CONFIG_CPU_IDLE
cpu_power_down_ok(struct dev_pm_domain * pd)342 static bool cpu_power_down_ok(struct dev_pm_domain *pd)
343 {
344 struct generic_pm_domain *genpd = pd_to_genpd(pd);
345 struct cpuidle_device *dev;
346 ktime_t domain_wakeup, next_hrtimer;
347 ktime_t now = ktime_get();
348 s64 idle_duration_ns;
349 int cpu, i;
350
351 /* Validate dev PM QoS constraints. */
352 if (!_default_power_down_ok(pd, now))
353 return false;
354
355 if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
356 return true;
357
358 /*
359 * Find the next wakeup for any of the online CPUs within the PM domain
360 * and its subdomains. Note, we only need the genpd->cpus, as it already
361 * contains a mask of all CPUs from subdomains.
362 */
363 domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
364 for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
365 dev = per_cpu(cpuidle_devices, cpu);
366 if (dev) {
367 next_hrtimer = READ_ONCE(dev->next_hrtimer);
368 if (ktime_before(next_hrtimer, domain_wakeup))
369 domain_wakeup = next_hrtimer;
370 }
371 }
372
373 /* The minimum idle duration is from now - until the next wakeup. */
374 idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
375 if (idle_duration_ns <= 0)
376 return false;
377
378 /*
379 * Find the deepest idle state that has its residency value satisfied
380 * and by also taking into account the power off latency for the state.
381 * Start at the state picked by the dev PM QoS constraint validation.
382 */
383 i = genpd->state_idx;
384 do {
385 if (idle_duration_ns >= (genpd->states[i].residency_ns +
386 genpd->states[i].power_off_latency_ns)) {
387 genpd->state_idx = i;
388 return true;
389 }
390 } while (--i >= 0);
391
392 return false;
393 }
394
395 struct dev_power_governor pm_domain_cpu_gov = {
396 .suspend_ok = default_suspend_ok,
397 .power_down_ok = cpu_power_down_ok,
398 };
399 #endif
400
401 struct dev_power_governor simple_qos_governor = {
402 .suspend_ok = default_suspend_ok,
403 .power_down_ok = default_power_down_ok,
404 };
405
406 /**
407 * pm_genpd_gov_always_on - A governor implementing an always-on policy
408 */
409 struct dev_power_governor pm_domain_always_on_gov = {
410 .power_down_ok = always_on_power_down_ok,
411 .suspend_ok = default_suspend_ok,
412 };
413