1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Per Entity Load Tracking
4 *
5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 *
7 * Interactivity improvements by Mike Galbraith
8 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 *
10 * Various enhancements by Dmitry Adamushko.
11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 *
13 * Group scheduling enhancements by Srivatsa Vaddagiri
14 * Copyright IBM Corporation, 2007
15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 *
17 * Scaled math optimizations by Thomas Gleixner
18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19 *
20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
22 *
23 * Move PELT related code from fair.c into this pelt.c file
24 * Author: Vincent Guittot <vincent.guittot@linaro.org>
25 */
26
27 #include <linux/sched.h>
28 #include "sched.h"
29 #include "pelt.h"
30
31 int pelt_load_avg_period = PELT32_LOAD_AVG_PERIOD;
32 int pelt_load_avg_max = PELT32_LOAD_AVG_MAX;
33 const u32 *pelt_runnable_avg_yN_inv = pelt32_runnable_avg_yN_inv;
34
get_pelt_halflife(void)35 int get_pelt_halflife(void)
36 {
37 return pelt_load_avg_period;
38 }
39 EXPORT_SYMBOL_GPL(get_pelt_halflife);
40
__set_pelt_halflife(void * data)41 static int __set_pelt_halflife(void *data)
42 {
43 int rc = 0;
44 int num = *(int *)data;
45
46 switch (num) {
47 case PELT8_LOAD_AVG_PERIOD:
48 pelt_load_avg_period = PELT8_LOAD_AVG_PERIOD;
49 pelt_load_avg_max = PELT8_LOAD_AVG_MAX;
50 pelt_runnable_avg_yN_inv = pelt8_runnable_avg_yN_inv;
51 pr_info("PELT half life is set to %dms\n", num);
52 break;
53 case PELT32_LOAD_AVG_PERIOD:
54 pelt_load_avg_period = PELT32_LOAD_AVG_PERIOD;
55 pelt_load_avg_max = PELT32_LOAD_AVG_MAX;
56 pelt_runnable_avg_yN_inv = pelt32_runnable_avg_yN_inv;
57 pr_info("PELT half life is set to %dms\n", num);
58 break;
59 default:
60 rc = -EINVAL;
61 pr_err("Failed to set PELT half life to %dms, the current value is %dms\n",
62 num, pelt_load_avg_period);
63 }
64
65 return rc;
66 }
67
set_pelt_halflife(int num)68 int set_pelt_halflife(int num)
69 {
70 return stop_machine(__set_pelt_halflife, &num, NULL);
71 }
72 EXPORT_SYMBOL_GPL(set_pelt_halflife);
73
set_pelt(char * str)74 static int __init set_pelt(char *str)
75 {
76 int rc, num;
77
78 rc = kstrtoint(str, 0, &num);
79 if (rc) {
80 pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
81 return 0;
82 }
83
84 __set_pelt_halflife(&num);
85 return rc;
86 }
87
88 early_param("pelt", set_pelt);
89
90 /*
91 * Approximate:
92 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
93 */
decay_load(u64 val,u64 n)94 static u64 decay_load(u64 val, u64 n)
95 {
96 unsigned int local_n;
97
98 if (unlikely(n > LOAD_AVG_PERIOD * 63))
99 return 0;
100
101 /* after bounds checking we can collapse to 32-bit */
102 local_n = n;
103
104 /*
105 * As y^PERIOD = 1/2, we can combine
106 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
107 * With a look-up table which covers y^n (n<PERIOD)
108 *
109 * To achieve constant time decay_load.
110 */
111 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
112 val >>= local_n / LOAD_AVG_PERIOD;
113 local_n %= LOAD_AVG_PERIOD;
114 }
115
116 val = mul_u64_u32_shr(val, pelt_runnable_avg_yN_inv[local_n], 32);
117 return val;
118 }
119
__accumulate_pelt_segments(u64 periods,u32 d1,u32 d3)120 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
121 {
122 u32 c1, c2, c3 = d3; /* y^0 == 1 */
123
124 /*
125 * c1 = d1 y^p
126 */
127 c1 = decay_load((u64)d1, periods);
128
129 /*
130 * p-1
131 * c2 = 1024 \Sum y^n
132 * n=1
133 *
134 * inf inf
135 * = 1024 ( \Sum y^n - \Sum y^n - y^0 )
136 * n=0 n=p
137 */
138 c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
139
140 return c1 + c2 + c3;
141 }
142
143 /*
144 * Accumulate the three separate parts of the sum; d1 the remainder
145 * of the last (incomplete) period, d2 the span of full periods and d3
146 * the remainder of the (incomplete) current period.
147 *
148 * d1 d2 d3
149 * ^ ^ ^
150 * | | |
151 * |<->|<----------------->|<--->|
152 * ... |---x---|------| ... |------|-----x (now)
153 *
154 * p-1
155 * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
156 * n=1
157 *
158 * = u y^p + (Step 1)
159 *
160 * p-1
161 * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2)
162 * n=1
163 */
164 static __always_inline u32
accumulate_sum(u64 delta,struct sched_avg * sa,unsigned long load,unsigned long runnable,int running)165 accumulate_sum(u64 delta, struct sched_avg *sa,
166 unsigned long load, unsigned long runnable, int running)
167 {
168 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
169 u64 periods;
170
171 delta += sa->period_contrib;
172 periods = delta / 1024; /* A period is 1024us (~1ms) */
173
174 /*
175 * Step 1: decay old *_sum if we crossed period boundaries.
176 */
177 if (periods) {
178 sa->load_sum = decay_load(sa->load_sum, periods);
179 sa->runnable_sum =
180 decay_load(sa->runnable_sum, periods);
181 sa->util_sum = decay_load((u64)(sa->util_sum), periods);
182
183 /*
184 * Step 2
185 */
186 delta %= 1024;
187 if (load) {
188 /*
189 * This relies on the:
190 *
191 * if (!load)
192 * runnable = running = 0;
193 *
194 * clause from ___update_load_sum(); this results in
195 * the below usage of @contrib to dissapear entirely,
196 * so no point in calculating it.
197 */
198 contrib = __accumulate_pelt_segments(periods,
199 1024 - sa->period_contrib, delta);
200 }
201 }
202 sa->period_contrib = delta;
203
204 if (load)
205 sa->load_sum += load * contrib;
206 if (runnable)
207 sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT;
208 if (running)
209 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
210
211 return periods;
212 }
213
214 /*
215 * We can represent the historical contribution to runnable average as the
216 * coefficients of a geometric series. To do this we sub-divide our runnable
217 * history into segments of approximately 1ms (1024us); label the segment that
218 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
219 *
220 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
221 * p0 p1 p2
222 * (now) (~1ms ago) (~2ms ago)
223 *
224 * Let u_i denote the fraction of p_i that the entity was runnable.
225 *
226 * We then designate the fractions u_i as our co-efficients, yielding the
227 * following representation of historical load:
228 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
229 *
230 * We choose y based on the with of a reasonably scheduling period, fixing:
231 * y^32 = 0.5
232 *
233 * This means that the contribution to load ~32ms ago (u_32) will be weighted
234 * approximately half as much as the contribution to load within the last ms
235 * (u_0).
236 *
237 * When a period "rolls over" and we have new u_0`, multiplying the previous
238 * sum again by y is sufficient to update:
239 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
240 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
241 */
242 static __always_inline int
___update_load_sum(u64 now,struct sched_avg * sa,unsigned long load,unsigned long runnable,int running)243 ___update_load_sum(u64 now, struct sched_avg *sa,
244 unsigned long load, unsigned long runnable, int running)
245 {
246 u64 delta;
247
248 delta = now - sa->last_update_time;
249 /*
250 * This should only happen when time goes backwards, which it
251 * unfortunately does during sched clock init when we swap over to TSC.
252 */
253 if ((s64)delta < 0) {
254 sa->last_update_time = now;
255 return 0;
256 }
257
258 /*
259 * Use 1024ns as the unit of measurement since it's a reasonable
260 * approximation of 1us and fast to compute.
261 */
262 delta >>= 10;
263 if (!delta)
264 return 0;
265
266 sa->last_update_time += delta << 10;
267
268 /*
269 * running is a subset of runnable (weight) so running can't be set if
270 * runnable is clear. But there are some corner cases where the current
271 * se has been already dequeued but cfs_rq->curr still points to it.
272 * This means that weight will be 0 but not running for a sched_entity
273 * but also for a cfs_rq if the latter becomes idle. As an example,
274 * this happens during idle_balance() which calls
275 * update_blocked_averages().
276 *
277 * Also see the comment in accumulate_sum().
278 */
279 if (!load)
280 runnable = running = 0;
281
282 /*
283 * Now we know we crossed measurement unit boundaries. The *_avg
284 * accrues by two steps:
285 *
286 * Step 1: accumulate *_sum since last_update_time. If we haven't
287 * crossed period boundaries, finish.
288 */
289 if (!accumulate_sum(delta, sa, load, runnable, running))
290 return 0;
291
292 return 1;
293 }
294
295 /*
296 * When syncing *_avg with *_sum, we must take into account the current
297 * position in the PELT segment otherwise the remaining part of the segment
298 * will be considered as idle time whereas it's not yet elapsed and this will
299 * generate unwanted oscillation in the range [1002..1024[.
300 *
301 * The max value of *_sum varies with the position in the time segment and is
302 * equals to :
303 *
304 * LOAD_AVG_MAX*y + sa->period_contrib
305 *
306 * which can be simplified into:
307 *
308 * LOAD_AVG_MAX - 1024 + sa->period_contrib
309 *
310 * because LOAD_AVG_MAX*y == LOAD_AVG_MAX-1024
311 *
312 * The same care must be taken when a sched entity is added, updated or
313 * removed from a cfs_rq and we need to update sched_avg. Scheduler entities
314 * and the cfs rq, to which they are attached, have the same position in the
315 * time segment because they use the same clock. This means that we can use
316 * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
317 * if it's more convenient.
318 */
319 static __always_inline void
___update_load_avg(struct sched_avg * sa,unsigned long load)320 ___update_load_avg(struct sched_avg *sa, unsigned long load)
321 {
322 u32 divider = get_pelt_divider(sa);
323
324 /*
325 * Step 2: update *_avg.
326 */
327 sa->load_avg = div_u64(load * sa->load_sum, divider);
328 sa->runnable_avg = div_u64(sa->runnable_sum, divider);
329 WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
330 }
331
332 /*
333 * sched_entity:
334 *
335 * task:
336 * se_weight() = se->load.weight
337 * se_runnable() = !!on_rq
338 *
339 * group: [ see update_cfs_group() ]
340 * se_weight() = tg->weight * grq->load_avg / tg->load_avg
341 * se_runnable() = grq->h_nr_running
342 *
343 * runnable_sum = se_runnable() * runnable = grq->runnable_sum
344 * runnable_avg = runnable_sum
345 *
346 * load_sum := runnable
347 * load_avg = se_weight(se) * load_sum
348 *
349 * cfq_rq:
350 *
351 * runnable_sum = \Sum se->avg.runnable_sum
352 * runnable_avg = \Sum se->avg.runnable_avg
353 *
354 * load_sum = \Sum se_weight(se) * se->avg.load_sum
355 * load_avg = \Sum se->avg.load_avg
356 */
357
__update_load_avg_blocked_se(u64 now,struct sched_entity * se)358 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
359 {
360 if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
361 ___update_load_avg(&se->avg, se_weight(se));
362 trace_pelt_se_tp(se);
363 return 1;
364 }
365
366 return 0;
367 }
368 EXPORT_SYMBOL_GPL(__update_load_avg_blocked_se);
369
__update_load_avg_se(u64 now,struct cfs_rq * cfs_rq,struct sched_entity * se)370 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
371 {
372 if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se),
373 cfs_rq->curr == se)) {
374
375 ___update_load_avg(&se->avg, se_weight(se));
376 cfs_se_util_change(&se->avg);
377 trace_pelt_se_tp(se);
378 return 1;
379 }
380
381 return 0;
382 }
383
__update_load_avg_cfs_rq(u64 now,struct cfs_rq * cfs_rq)384 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
385 {
386 if (___update_load_sum(now, &cfs_rq->avg,
387 scale_load_down(cfs_rq->load.weight),
388 cfs_rq->h_nr_running,
389 cfs_rq->curr != NULL)) {
390
391 ___update_load_avg(&cfs_rq->avg, 1);
392 trace_pelt_cfs_tp(cfs_rq);
393 return 1;
394 }
395
396 return 0;
397 }
398
399 /*
400 * rt_rq:
401 *
402 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
403 * util_sum = cpu_scale * load_sum
404 * runnable_sum = util_sum
405 *
406 * load_avg and runnable_avg are not supported and meaningless.
407 *
408 */
409
update_rt_rq_load_avg(u64 now,struct rq * rq,int running)410 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
411 {
412 if (___update_load_sum(now, &rq->avg_rt,
413 running,
414 running,
415 running)) {
416
417 ___update_load_avg(&rq->avg_rt, 1);
418 trace_pelt_rt_tp(rq);
419 return 1;
420 }
421
422 return 0;
423 }
424
425 /*
426 * dl_rq:
427 *
428 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
429 * util_sum = cpu_scale * load_sum
430 * runnable_sum = util_sum
431 *
432 * load_avg and runnable_avg are not supported and meaningless.
433 *
434 */
435
update_dl_rq_load_avg(u64 now,struct rq * rq,int running)436 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
437 {
438 if (___update_load_sum(now, &rq->avg_dl,
439 running,
440 running,
441 running)) {
442
443 ___update_load_avg(&rq->avg_dl, 1);
444 trace_pelt_dl_tp(rq);
445 return 1;
446 }
447
448 return 0;
449 }
450
451 #ifdef CONFIG_SCHED_THERMAL_PRESSURE
452 /*
453 * thermal:
454 *
455 * load_sum = \Sum se->avg.load_sum but se->avg.load_sum is not tracked
456 *
457 * util_avg and runnable_load_avg are not supported and meaningless.
458 *
459 * Unlike rt/dl utilization tracking that track time spent by a cpu
460 * running a rt/dl task through util_avg, the average thermal pressure is
461 * tracked through load_avg. This is because thermal pressure signal is
462 * time weighted "delta" capacity unlike util_avg which is binary.
463 * "delta capacity" = actual capacity -
464 * capped capacity a cpu due to a thermal event.
465 */
466
update_thermal_load_avg(u64 now,struct rq * rq,u64 capacity)467 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
468 {
469 if (___update_load_sum(now, &rq->avg_thermal,
470 capacity,
471 capacity,
472 capacity)) {
473 ___update_load_avg(&rq->avg_thermal, 1);
474 trace_pelt_thermal_tp(rq);
475 return 1;
476 }
477
478 return 0;
479 }
480 #endif
481
482 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
483 /*
484 * irq:
485 *
486 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
487 * util_sum = cpu_scale * load_sum
488 * runnable_sum = util_sum
489 *
490 * load_avg and runnable_avg are not supported and meaningless.
491 *
492 */
493
update_irq_load_avg(struct rq * rq,u64 running)494 int update_irq_load_avg(struct rq *rq, u64 running)
495 {
496 int ret = 0;
497
498 /*
499 * We can't use clock_pelt because irq time is not accounted in
500 * clock_task. Instead we directly scale the running time to
501 * reflect the real amount of computation
502 */
503 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
504 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
505
506 /*
507 * We know the time that has been used by interrupt since last update
508 * but we don't when. Let be pessimistic and assume that interrupt has
509 * happened just before the update. This is not so far from reality
510 * because interrupt will most probably wake up task and trig an update
511 * of rq clock during which the metric is updated.
512 * We start to decay with normal context time and then we add the
513 * interrupt context time.
514 * We can safely remove running from rq->clock because
515 * rq->clock += delta with delta >= running
516 */
517 ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
518 0,
519 0,
520 0);
521 ret += ___update_load_sum(rq->clock, &rq->avg_irq,
522 1,
523 1,
524 1);
525
526 if (ret) {
527 ___update_load_avg(&rq->avg_irq, 1);
528 trace_pelt_irq_tp(rq);
529 }
530
531 return ret;
532 }
533 #endif
534
535 #include <trace/hooks/sched.h>
536 DEFINE_PER_CPU(u64, clock_task_mult);
537
538 unsigned int sysctl_sched_pelt_multiplier = 1;
539 __read_mostly unsigned int sched_pelt_lshift;
540
sched_pelt_multiplier(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)541 int sched_pelt_multiplier(struct ctl_table *table, int write, void *buffer,
542 size_t *lenp, loff_t *ppos)
543 {
544 static DEFINE_MUTEX(mutex);
545 unsigned int old;
546 int ret;
547
548 mutex_lock(&mutex);
549
550 old = sysctl_sched_pelt_multiplier;
551 ret = proc_dointvec(table, write, buffer, lenp, ppos);
552 if (ret)
553 goto undo;
554 if (!write)
555 goto done;
556
557 trace_android_vh_sched_pelt_multiplier(old, sysctl_sched_pelt_multiplier, &ret);
558 if (ret)
559 goto undo;
560
561 switch (sysctl_sched_pelt_multiplier) {
562 case 1:
563 fallthrough;
564 case 2:
565 fallthrough;
566 case 4:
567 WRITE_ONCE(sched_pelt_lshift,
568 sysctl_sched_pelt_multiplier >> 1);
569 goto done;
570 default:
571 ret = -EINVAL;
572 }
573
574 undo:
575 sysctl_sched_pelt_multiplier = old;
576 done:
577 mutex_unlock(&mutex);
578
579 return ret;
580 }
581