1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 *
5 * Copyright (C) 2016, Intel Corporation
6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 */
8
9 #include <trace/hooks/sched.h>
10
11 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
12
13 struct sugov_tunables {
14 struct gov_attr_set attr_set;
15 unsigned int rate_limit_us;
16 };
17
18 struct sugov_policy {
19 struct cpufreq_policy *policy;
20
21 struct sugov_tunables *tunables;
22 struct list_head tunables_hook;
23
24 raw_spinlock_t update_lock;
25 u64 last_freq_update_time;
26 s64 freq_update_delay_ns;
27 unsigned int next_freq;
28 unsigned int cached_raw_freq;
29
30 /* The next fields are only needed if fast switch cannot be used: */
31 struct irq_work irq_work;
32 struct kthread_work work;
33 struct mutex work_lock;
34 struct kthread_worker worker;
35 struct task_struct *thread;
36 bool work_in_progress;
37
38 bool limits_changed;
39 bool need_freq_update;
40 };
41
42 struct sugov_cpu {
43 struct update_util_data update_util;
44 struct sugov_policy *sg_policy;
45 unsigned int cpu;
46
47 bool iowait_boost_pending;
48 unsigned int iowait_boost;
49 u64 last_update;
50
51 unsigned long util;
52 unsigned long bw_min;
53
54 /* The field below is for single-CPU policies only: */
55 #ifdef CONFIG_NO_HZ_COMMON
56 unsigned long saved_idle_calls;
57 #endif
58 };
59
60 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
61
62 /************************ Governor internals ***********************/
63
sugov_should_update_freq(struct sugov_policy * sg_policy,u64 time)64 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
65 {
66 s64 delta_ns;
67
68 /*
69 * Since cpufreq_update_util() is called with rq->lock held for
70 * the @target_cpu, our per-CPU data is fully serialized.
71 *
72 * However, drivers cannot in general deal with cross-CPU
73 * requests, so while get_next_freq() will work, our
74 * sugov_update_commit() call may not for the fast switching platforms.
75 *
76 * Hence stop here for remote requests if they aren't supported
77 * by the hardware, as calculating the frequency is pointless if
78 * we cannot in fact act on it.
79 *
80 * This is needed on the slow switching platforms too to prevent CPUs
81 * going offline from leaving stale IRQ work items behind.
82 */
83 if (!cpufreq_this_cpu_can_update(sg_policy->policy))
84 return false;
85
86 if (unlikely(READ_ONCE(sg_policy->limits_changed))) {
87 WRITE_ONCE(sg_policy->limits_changed, false);
88 sg_policy->need_freq_update = true;
89
90 /*
91 * The above limits_changed update must occur before the reads
92 * of policy limits in cpufreq_driver_resolve_freq() or a policy
93 * limits update might be missed, so use a memory barrier to
94 * ensure it.
95 *
96 * This pairs with the write memory barrier in sugov_limits().
97 */
98 smp_mb();
99
100 return true;
101 }
102
103 delta_ns = time - sg_policy->last_freq_update_time;
104
105 return delta_ns >= sg_policy->freq_update_delay_ns;
106 }
107
sugov_update_next_freq(struct sugov_policy * sg_policy,u64 time,unsigned int next_freq)108 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
109 unsigned int next_freq)
110 {
111 bool should_update = true;
112
113 if (sg_policy->need_freq_update) {
114 sg_policy->need_freq_update = false;
115 /*
116 * The policy limits have changed, but if the return value of
117 * cpufreq_driver_resolve_freq() after applying the new limits
118 * is still equal to the previously selected frequency, the
119 * driver callback need not be invoked unless the driver
120 * specifically wants that to happen on every update of the
121 * policy limits.
122 */
123 if (sg_policy->next_freq == next_freq &&
124 !cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
125 return false;
126 } else if (sg_policy->next_freq == next_freq) {
127 return false;
128 }
129
130 trace_android_rvh_set_sugov_update(sg_policy, next_freq, &should_update);
131 if (!should_update)
132 return false;
133
134 sg_policy->next_freq = next_freq;
135 sg_policy->last_freq_update_time = time;
136
137 return true;
138 }
139
sugov_deferred_update(struct sugov_policy * sg_policy)140 static void sugov_deferred_update(struct sugov_policy *sg_policy)
141 {
142 if (!sg_policy->work_in_progress) {
143 sg_policy->work_in_progress = true;
144 irq_work_queue(&sg_policy->irq_work);
145 }
146 }
147
148 /**
149 * get_capacity_ref_freq - get the reference frequency that has been used to
150 * correlate frequency and compute capacity for a given cpufreq policy. We use
151 * the CPU managing it for the arch_scale_freq_ref() call in the function.
152 * @policy: the cpufreq policy of the CPU in question.
153 *
154 * Return: the reference CPU frequency to compute a capacity.
155 */
156 static __always_inline
get_capacity_ref_freq(struct cpufreq_policy * policy)157 unsigned long get_capacity_ref_freq(struct cpufreq_policy *policy)
158 {
159 unsigned int freq = arch_scale_freq_ref(policy->cpu);
160
161 if (freq)
162 return freq;
163
164 if (arch_scale_freq_invariant())
165 return policy->cpuinfo.max_freq;
166
167 /*
168 * Apply a 25% margin so that we select a higher frequency than
169 * the current one before the CPU is fully busy:
170 */
171 return policy->cur + (policy->cur >> 2);
172 }
173
174 /**
175 * get_next_freq - Compute a new frequency for a given cpufreq policy.
176 * @sg_policy: schedutil policy object to compute the new frequency for.
177 * @util: Current CPU utilization.
178 * @max: CPU capacity.
179 *
180 * If the utilization is frequency-invariant, choose the new frequency to be
181 * proportional to it, that is
182 *
183 * next_freq = C * max_freq * util / max
184 *
185 * Otherwise, approximate the would-be frequency-invariant utilization by
186 * util_raw * (curr_freq / max_freq) which leads to
187 *
188 * next_freq = C * curr_freq * util_raw / max
189 *
190 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
191 *
192 * The lowest driver-supported frequency which is equal or greater than the raw
193 * next_freq (as calculated above) is returned, subject to policy min/max and
194 * cpufreq driver limitations.
195 */
get_next_freq(struct sugov_policy * sg_policy,unsigned long util,unsigned long max)196 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
197 unsigned long util, unsigned long max)
198 {
199 struct cpufreq_policy *policy = sg_policy->policy;
200 unsigned int freq;
201 unsigned long next_freq = 0;
202
203 freq = get_capacity_ref_freq(policy);
204 trace_android_vh_map_util_freq(util, freq, max, &next_freq, policy,
205 &sg_policy->need_freq_update);
206 if (next_freq)
207 freq = next_freq;
208 else
209 freq = map_util_freq(util, freq, max);
210
211 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
212 return sg_policy->next_freq;
213
214 sg_policy->cached_raw_freq = freq;
215 return cpufreq_driver_resolve_freq(policy, freq);
216 }
217
sugov_effective_cpu_perf(int cpu,unsigned long actual,unsigned long min,unsigned long max)218 unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
219 unsigned long min,
220 unsigned long max)
221 {
222 /* Add dvfs headroom to actual utilization */
223 actual = map_util_perf(actual);
224 /* Actually we don't need to target the max performance */
225 if (actual < max)
226 max = actual;
227
228 /*
229 * Ensure at least minimum performance while providing more compute
230 * capacity when possible.
231 */
232 return max(min, max);
233 }
234
sugov_get_util(struct sugov_cpu * sg_cpu,unsigned long boost)235 static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost)
236 {
237 unsigned long min, max, util = scx_cpuperf_target(sg_cpu->cpu);
238
239 if (!scx_switched_all())
240 util += cpu_util_cfs_boost(sg_cpu->cpu);
241 util = effective_cpu_util(sg_cpu->cpu, util, &min, &max);
242 util = max(util, boost);
243 sg_cpu->bw_min = min;
244 sg_cpu->util = sugov_effective_cpu_perf(sg_cpu->cpu, util, min, max);
245 }
246
247 /**
248 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
249 * @sg_cpu: the sugov data for the CPU to boost
250 * @time: the update time from the caller
251 * @set_iowait_boost: true if an IO boost has been requested
252 *
253 * The IO wait boost of a task is disabled after a tick since the last update
254 * of a CPU. If a new IO wait boost is requested after more then a tick, then
255 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
256 * efficiency by ignoring sporadic wakeups from IO.
257 */
sugov_iowait_reset(struct sugov_cpu * sg_cpu,u64 time,bool set_iowait_boost)258 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
259 bool set_iowait_boost)
260 {
261 s64 delta_ns = time - sg_cpu->last_update;
262
263 /* Reset boost only if a tick has elapsed since last request */
264 if (delta_ns <= TICK_NSEC)
265 return false;
266
267 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
268 sg_cpu->iowait_boost_pending = set_iowait_boost;
269
270 return true;
271 }
272
273 /**
274 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
275 * @sg_cpu: the sugov data for the CPU to boost
276 * @time: the update time from the caller
277 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
278 *
279 * Each time a task wakes up after an IO operation, the CPU utilization can be
280 * boosted to a certain utilization which doubles at each "frequent and
281 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
282 * of the maximum OPP.
283 *
284 * To keep doubling, an IO boost has to be requested at least once per tick,
285 * otherwise we restart from the utilization of the minimum OPP.
286 */
sugov_iowait_boost(struct sugov_cpu * sg_cpu,u64 time,unsigned int flags)287 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
288 unsigned int flags)
289 {
290 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
291
292 /* Reset boost if the CPU appears to have been idle enough */
293 if (sg_cpu->iowait_boost &&
294 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
295 return;
296
297 /* Boost only tasks waking up after IO */
298 if (!set_iowait_boost)
299 return;
300
301 /* Ensure boost doubles only one time at each request */
302 if (sg_cpu->iowait_boost_pending)
303 return;
304 sg_cpu->iowait_boost_pending = true;
305
306 /* Double the boost at each request */
307 if (sg_cpu->iowait_boost) {
308 sg_cpu->iowait_boost =
309 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
310 return;
311 }
312
313 /* First wakeup after IO: start with minimum boost */
314 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
315 }
316
317 /**
318 * sugov_iowait_apply() - Apply the IO boost to a CPU.
319 * @sg_cpu: the sugov data for the cpu to boost
320 * @time: the update time from the caller
321 * @max_cap: the max CPU capacity
322 *
323 * A CPU running a task which woken up after an IO operation can have its
324 * utilization boosted to speed up the completion of those IO operations.
325 * The IO boost value is increased each time a task wakes up from IO, in
326 * sugov_iowait_apply(), and it's instead decreased by this function,
327 * each time an increase has not been requested (!iowait_boost_pending).
328 *
329 * A CPU which also appears to have been idle for at least one tick has also
330 * its IO boost utilization reset.
331 *
332 * This mechanism is designed to boost high frequently IO waiting tasks, while
333 * being more conservative on tasks which does sporadic IO operations.
334 */
sugov_iowait_apply(struct sugov_cpu * sg_cpu,u64 time,unsigned long max_cap)335 static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
336 unsigned long max_cap)
337 {
338 /* No boost currently required */
339 if (!sg_cpu->iowait_boost)
340 return 0;
341
342 /* Reset boost if the CPU appears to have been idle enough */
343 if (sugov_iowait_reset(sg_cpu, time, false))
344 return 0;
345
346 if (!sg_cpu->iowait_boost_pending) {
347 /*
348 * No boost pending; reduce the boost value.
349 */
350 sg_cpu->iowait_boost >>= 1;
351 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
352 sg_cpu->iowait_boost = 0;
353 return 0;
354 }
355 }
356
357 sg_cpu->iowait_boost_pending = false;
358
359 /*
360 * sg_cpu->util is already in capacity scale; convert iowait_boost
361 * into the same scale so we can compare.
362 */
363 return (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;
364 }
365
366 #ifdef CONFIG_NO_HZ_COMMON
sugov_hold_freq(struct sugov_cpu * sg_cpu)367 static bool sugov_hold_freq(struct sugov_cpu *sg_cpu)
368 {
369 unsigned long idle_calls;
370 bool ret;
371
372 /*
373 * The heuristics in this function is for the fair class. For SCX, the
374 * performance target comes directly from the BPF scheduler. Let's just
375 * follow it.
376 */
377 if (scx_switched_all())
378 return false;
379
380 /* if capped by uclamp_max, always update to be in compliance */
381 if (uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)))
382 return false;
383
384 /*
385 * Maintain the frequency if the CPU has not been idle recently, as
386 * reduction is likely to be premature.
387 */
388 idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
389 ret = idle_calls == sg_cpu->saved_idle_calls;
390
391 sg_cpu->saved_idle_calls = idle_calls;
392 return ret;
393 }
394 #else
sugov_hold_freq(struct sugov_cpu * sg_cpu)395 static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return false; }
396 #endif /* CONFIG_NO_HZ_COMMON */
397
398 /*
399 * Make sugov_should_update_freq() ignore the rate limit when DL
400 * has increased the utilization.
401 */
ignore_dl_rate_limit(struct sugov_cpu * sg_cpu)402 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
403 {
404 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min)
405 WRITE_ONCE(sg_cpu->sg_policy->limits_changed, true);
406 }
407
sugov_update_single_common(struct sugov_cpu * sg_cpu,u64 time,unsigned long max_cap,unsigned int flags)408 static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
409 u64 time, unsigned long max_cap,
410 unsigned int flags)
411 {
412 unsigned long boost;
413
414 sugov_iowait_boost(sg_cpu, time, flags);
415 sg_cpu->last_update = time;
416
417 ignore_dl_rate_limit(sg_cpu);
418
419 if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
420 return false;
421
422 boost = sugov_iowait_apply(sg_cpu, time, max_cap);
423 sugov_get_util(sg_cpu, boost);
424
425 return true;
426 }
427
sugov_update_single_freq(struct update_util_data * hook,u64 time,unsigned int flags)428 static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
429 unsigned int flags)
430 {
431 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
432 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
433 unsigned int cached_freq = sg_policy->cached_raw_freq;
434 unsigned long max_cap;
435 unsigned int next_f;
436
437 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
438
439 if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
440 return;
441
442 next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap);
443
444 if (sugov_hold_freq(sg_cpu) && next_f < sg_policy->next_freq &&
445 !sg_policy->need_freq_update) {
446 next_f = sg_policy->next_freq;
447
448 /* Restore cached freq as next_freq has changed */
449 sg_policy->cached_raw_freq = cached_freq;
450 }
451
452 if (!sugov_update_next_freq(sg_policy, time, next_f))
453 return;
454
455 /*
456 * This code runs under rq->lock for the target CPU, so it won't run
457 * concurrently on two different CPUs for the same target and it is not
458 * necessary to acquire the lock in the fast switch case.
459 */
460 if (sg_policy->policy->fast_switch_enabled) {
461 cpufreq_driver_fast_switch(sg_policy->policy, next_f);
462 } else {
463 raw_spin_lock(&sg_policy->update_lock);
464 sugov_deferred_update(sg_policy);
465 raw_spin_unlock(&sg_policy->update_lock);
466 }
467 }
468
sugov_update_single_perf(struct update_util_data * hook,u64 time,unsigned int flags)469 static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
470 unsigned int flags)
471 {
472 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
473 unsigned long prev_util = sg_cpu->util;
474 unsigned long max_cap;
475
476 /*
477 * Fall back to the "frequency" path if frequency invariance is not
478 * supported, because the direct mapping between the utilization and
479 * the performance levels depends on the frequency invariance.
480 */
481 if (!arch_scale_freq_invariant()) {
482 sugov_update_single_freq(hook, time, flags);
483 return;
484 }
485
486 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
487
488 if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
489 return;
490
491 if (sugov_hold_freq(sg_cpu) && sg_cpu->util < prev_util)
492 sg_cpu->util = prev_util;
493
494 cpufreq_driver_adjust_perf(sg_cpu->cpu, sg_cpu->bw_min,
495 sg_cpu->util, max_cap);
496
497 sg_cpu->sg_policy->last_freq_update_time = time;
498 }
499
sugov_next_freq_shared(struct sugov_cpu * sg_cpu,u64 time)500 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
501 {
502 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
503 struct cpufreq_policy *policy = sg_policy->policy;
504 unsigned long util = 0, max_cap;
505 unsigned int j;
506
507 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
508
509 for_each_cpu(j, policy->cpus) {
510 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
511 unsigned long boost;
512
513 boost = sugov_iowait_apply(j_sg_cpu, time, max_cap);
514 sugov_get_util(j_sg_cpu, boost);
515
516 util = max(j_sg_cpu->util, util);
517 }
518
519 return get_next_freq(sg_policy, util, max_cap);
520 }
521
522 static void
sugov_update_shared(struct update_util_data * hook,u64 time,unsigned int flags)523 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
524 {
525 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
526 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
527 unsigned int next_f;
528
529 raw_spin_lock(&sg_policy->update_lock);
530
531 sugov_iowait_boost(sg_cpu, time, flags);
532 sg_cpu->last_update = time;
533
534 ignore_dl_rate_limit(sg_cpu);
535
536 if (sugov_should_update_freq(sg_policy, time)) {
537 next_f = sugov_next_freq_shared(sg_cpu, time);
538
539 if (!sugov_update_next_freq(sg_policy, time, next_f))
540 goto unlock;
541
542 if (sg_policy->policy->fast_switch_enabled)
543 cpufreq_driver_fast_switch(sg_policy->policy, next_f);
544 else
545 sugov_deferred_update(sg_policy);
546 }
547 unlock:
548 raw_spin_unlock(&sg_policy->update_lock);
549 }
550
sugov_work(struct kthread_work * work)551 static void sugov_work(struct kthread_work *work)
552 {
553 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
554 unsigned int freq;
555 unsigned long flags;
556
557 /*
558 * Hold sg_policy->update_lock shortly to handle the case where:
559 * in case sg_policy->next_freq is read here, and then updated by
560 * sugov_deferred_update() just before work_in_progress is set to false
561 * here, we may miss queueing the new update.
562 *
563 * Note: If a work was queued after the update_lock is released,
564 * sugov_work() will just be called again by kthread_work code; and the
565 * request will be proceed before the sugov thread sleeps.
566 */
567 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
568 freq = sg_policy->next_freq;
569 sg_policy->work_in_progress = false;
570 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
571
572 mutex_lock(&sg_policy->work_lock);
573 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
574 mutex_unlock(&sg_policy->work_lock);
575 }
576
sugov_irq_work(struct irq_work * irq_work)577 static void sugov_irq_work(struct irq_work *irq_work)
578 {
579 struct sugov_policy *sg_policy;
580
581 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
582
583 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
584 }
585
586 /************************** sysfs interface ************************/
587
588 static struct sugov_tunables *global_tunables;
589 static DEFINE_MUTEX(global_tunables_lock);
590
to_sugov_tunables(struct gov_attr_set * attr_set)591 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
592 {
593 return container_of(attr_set, struct sugov_tunables, attr_set);
594 }
595
rate_limit_us_show(struct gov_attr_set * attr_set,char * buf)596 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
597 {
598 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
599
600 return sprintf(buf, "%u\n", tunables->rate_limit_us);
601 }
602
603 static ssize_t
rate_limit_us_store(struct gov_attr_set * attr_set,const char * buf,size_t count)604 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
605 {
606 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
607 struct sugov_policy *sg_policy;
608 unsigned int rate_limit_us;
609
610 if (kstrtouint(buf, 10, &rate_limit_us))
611 return -EINVAL;
612
613 tunables->rate_limit_us = rate_limit_us;
614
615 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
616 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
617
618 return count;
619 }
620
621 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
622
623 static struct attribute *sugov_attrs[] = {
624 &rate_limit_us.attr,
625 NULL
626 };
627 ATTRIBUTE_GROUPS(sugov);
628
sugov_tunables_free(struct kobject * kobj)629 static void sugov_tunables_free(struct kobject *kobj)
630 {
631 struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
632
633 kfree(to_sugov_tunables(attr_set));
634 }
635
636 static const struct kobj_type sugov_tunables_ktype = {
637 .default_groups = sugov_groups,
638 .sysfs_ops = &governor_sysfs_ops,
639 .release = &sugov_tunables_free,
640 };
641
642 /********************** cpufreq governor interface *********************/
643
644 #ifdef CONFIG_ENERGY_MODEL
rebuild_sd_workfn(struct work_struct * work)645 static void rebuild_sd_workfn(struct work_struct *work)
646 {
647 rebuild_sched_domains_energy();
648 }
649
650 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
651
652 /*
653 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
654 * on governor changes to make sure the scheduler knows about it.
655 */
sugov_eas_rebuild_sd(void)656 static void sugov_eas_rebuild_sd(void)
657 {
658 /*
659 * When called from the cpufreq_register_driver() path, the
660 * cpu_hotplug_lock is already held, so use a work item to
661 * avoid nested locking in rebuild_sched_domains().
662 */
663 schedule_work(&rebuild_sd_work);
664 }
665 #else
sugov_eas_rebuild_sd(void)666 static inline void sugov_eas_rebuild_sd(void) { };
667 #endif
668
669 struct cpufreq_governor schedutil_gov;
670
sugov_policy_alloc(struct cpufreq_policy * policy)671 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
672 {
673 struct sugov_policy *sg_policy;
674
675 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
676 if (!sg_policy)
677 return NULL;
678
679 sg_policy->policy = policy;
680 raw_spin_lock_init(&sg_policy->update_lock);
681 return sg_policy;
682 }
683
sugov_policy_free(struct sugov_policy * sg_policy)684 static void sugov_policy_free(struct sugov_policy *sg_policy)
685 {
686 kfree(sg_policy);
687 }
688
sugov_kthread_create(struct sugov_policy * sg_policy)689 static int sugov_kthread_create(struct sugov_policy *sg_policy)
690 {
691 struct task_struct *thread;
692 struct sched_attr attr = {
693 .size = sizeof(struct sched_attr),
694 .sched_policy = SCHED_DEADLINE,
695 .sched_flags = SCHED_FLAG_SUGOV,
696 .sched_nice = 0,
697 .sched_priority = 0,
698 /*
699 * Fake (unused) bandwidth; workaround to "fix"
700 * priority inheritance.
701 */
702 .sched_runtime = NSEC_PER_MSEC,
703 .sched_deadline = 10 * NSEC_PER_MSEC,
704 .sched_period = 10 * NSEC_PER_MSEC,
705 };
706 struct cpufreq_policy *policy = sg_policy->policy;
707 int ret;
708
709 /* kthread only required for slow path */
710 if (policy->fast_switch_enabled)
711 return 0;
712
713 trace_android_vh_set_sugov_sched_attr(&attr);
714 kthread_init_work(&sg_policy->work, sugov_work);
715 kthread_init_worker(&sg_policy->worker);
716 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
717 "sugov:%d",
718 cpumask_first(policy->related_cpus));
719 if (IS_ERR(thread)) {
720 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
721 return PTR_ERR(thread);
722 }
723
724 ret = sched_setattr_nocheck(thread, &attr);
725 if (ret) {
726 kthread_stop(thread);
727 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
728 return ret;
729 }
730
731 sg_policy->thread = thread;
732 kthread_bind_mask(thread, policy->related_cpus);
733 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
734 mutex_init(&sg_policy->work_lock);
735
736 wake_up_process(thread);
737
738 return 0;
739 }
740
sugov_kthread_stop(struct sugov_policy * sg_policy)741 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
742 {
743 /* kthread only required for slow path */
744 if (sg_policy->policy->fast_switch_enabled)
745 return;
746
747 kthread_flush_worker(&sg_policy->worker);
748 kthread_stop(sg_policy->thread);
749 mutex_destroy(&sg_policy->work_lock);
750 }
751
sugov_tunables_alloc(struct sugov_policy * sg_policy)752 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
753 {
754 struct sugov_tunables *tunables;
755
756 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
757 if (tunables) {
758 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
759 if (!have_governor_per_policy())
760 global_tunables = tunables;
761 }
762 return tunables;
763 }
764
sugov_clear_global_tunables(void)765 static void sugov_clear_global_tunables(void)
766 {
767 if (!have_governor_per_policy())
768 global_tunables = NULL;
769 }
770
sugov_init(struct cpufreq_policy * policy)771 static int sugov_init(struct cpufreq_policy *policy)
772 {
773 struct sugov_policy *sg_policy;
774 struct sugov_tunables *tunables;
775 int ret = 0;
776
777 /* State should be equivalent to EXIT */
778 if (policy->governor_data)
779 return -EBUSY;
780
781 cpufreq_enable_fast_switch(policy);
782
783 sg_policy = sugov_policy_alloc(policy);
784 if (!sg_policy) {
785 ret = -ENOMEM;
786 goto disable_fast_switch;
787 }
788
789 ret = sugov_kthread_create(sg_policy);
790 if (ret)
791 goto free_sg_policy;
792
793 mutex_lock(&global_tunables_lock);
794
795 if (global_tunables) {
796 if (WARN_ON(have_governor_per_policy())) {
797 ret = -EINVAL;
798 goto stop_kthread;
799 }
800 policy->governor_data = sg_policy;
801 sg_policy->tunables = global_tunables;
802
803 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
804 goto out;
805 }
806
807 tunables = sugov_tunables_alloc(sg_policy);
808 if (!tunables) {
809 ret = -ENOMEM;
810 goto stop_kthread;
811 }
812
813 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
814
815 policy->governor_data = sg_policy;
816 sg_policy->tunables = tunables;
817
818 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
819 get_governor_parent_kobj(policy), "%s",
820 schedutil_gov.name);
821 if (ret)
822 goto fail;
823
824 out:
825 sugov_eas_rebuild_sd();
826 mutex_unlock(&global_tunables_lock);
827 return 0;
828
829 fail:
830 kobject_put(&tunables->attr_set.kobj);
831 policy->governor_data = NULL;
832 sugov_clear_global_tunables();
833
834 stop_kthread:
835 sugov_kthread_stop(sg_policy);
836 mutex_unlock(&global_tunables_lock);
837
838 free_sg_policy:
839 sugov_policy_free(sg_policy);
840
841 disable_fast_switch:
842 cpufreq_disable_fast_switch(policy);
843
844 pr_err("initialization failed (error %d)\n", ret);
845 return ret;
846 }
847
sugov_exit(struct cpufreq_policy * policy)848 static void sugov_exit(struct cpufreq_policy *policy)
849 {
850 struct sugov_policy *sg_policy = policy->governor_data;
851 struct sugov_tunables *tunables = sg_policy->tunables;
852 unsigned int count;
853
854 mutex_lock(&global_tunables_lock);
855
856 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
857 policy->governor_data = NULL;
858 if (!count)
859 sugov_clear_global_tunables();
860
861 mutex_unlock(&global_tunables_lock);
862
863 sugov_kthread_stop(sg_policy);
864 sugov_policy_free(sg_policy);
865 cpufreq_disable_fast_switch(policy);
866
867 sugov_eas_rebuild_sd();
868 }
869
sugov_start(struct cpufreq_policy * policy)870 static int sugov_start(struct cpufreq_policy *policy)
871 {
872 struct sugov_policy *sg_policy = policy->governor_data;
873 void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
874 unsigned int cpu;
875
876 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
877 sg_policy->last_freq_update_time = 0;
878 sg_policy->next_freq = 0;
879 sg_policy->work_in_progress = false;
880 sg_policy->limits_changed = false;
881 sg_policy->cached_raw_freq = 0;
882
883 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
884
885 if (policy_is_shared(policy))
886 uu = sugov_update_shared;
887 else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
888 uu = sugov_update_single_perf;
889 else
890 uu = sugov_update_single_freq;
891
892 for_each_cpu(cpu, policy->cpus) {
893 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
894
895 memset(sg_cpu, 0, sizeof(*sg_cpu));
896 sg_cpu->cpu = cpu;
897 sg_cpu->sg_policy = sg_policy;
898 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
899 }
900 return 0;
901 }
902
sugov_stop(struct cpufreq_policy * policy)903 static void sugov_stop(struct cpufreq_policy *policy)
904 {
905 struct sugov_policy *sg_policy = policy->governor_data;
906 unsigned int cpu;
907
908 for_each_cpu(cpu, policy->cpus)
909 cpufreq_remove_update_util_hook(cpu);
910
911 synchronize_rcu();
912
913 if (!policy->fast_switch_enabled) {
914 irq_work_sync(&sg_policy->irq_work);
915 kthread_cancel_work_sync(&sg_policy->work);
916 }
917 }
918
sugov_limits(struct cpufreq_policy * policy)919 static void sugov_limits(struct cpufreq_policy *policy)
920 {
921 struct sugov_policy *sg_policy = policy->governor_data;
922
923 if (!policy->fast_switch_enabled) {
924 mutex_lock(&sg_policy->work_lock);
925 cpufreq_policy_apply_limits(policy);
926 mutex_unlock(&sg_policy->work_lock);
927 }
928
929 /*
930 * The limits_changed update below must take place before the updates
931 * of policy limits in cpufreq_set_policy() or a policy limits update
932 * might be missed, so use a memory barrier to ensure it.
933 *
934 * This pairs with the memory barrier in sugov_should_update_freq().
935 */
936 smp_wmb();
937
938 WRITE_ONCE(sg_policy->limits_changed, true);
939 }
940
941 struct cpufreq_governor schedutil_gov = {
942 .name = "schedutil",
943 .owner = THIS_MODULE,
944 .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
945 .init = sugov_init,
946 .exit = sugov_exit,
947 .start = sugov_start,
948 .stop = sugov_stop,
949 .limits = sugov_limits,
950 };
951
952 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
cpufreq_default_governor(void)953 struct cpufreq_governor *cpufreq_default_governor(void)
954 {
955 return &schedutil_gov;
956 }
957 #endif
958
959 cpufreq_governor_init(schedutil_gov);
960