• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Per Entity Load Tracking
4  *
5  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  *
7  *  Interactivity improvements by Mike Galbraith
8  *  (C) 2007 Mike Galbraith <efault@gmx.de>
9  *
10  *  Various enhancements by Dmitry Adamushko.
11  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12  *
13  *  Group scheduling enhancements by Srivatsa Vaddagiri
14  *  Copyright IBM Corporation, 2007
15  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16  *
17  *  Scaled math optimizations by Thomas Gleixner
18  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19  *
20  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
22  *
23  *  Move PELT related code from fair.c into this pelt.c file
24  *  Author: Vincent Guittot <vincent.guittot@linaro.org>
25  */
26 
27 #include <linux/sched.h>
28 #include "sched.h"
29 #include "pelt.h"
30 
31 int pelt_load_avg_period = PELT32_LOAD_AVG_PERIOD;
32 int pelt_load_avg_max = PELT32_LOAD_AVG_MAX;
33 EXPORT_SYMBOL_GPL(pelt_load_avg_max);
34 const u32 *pelt_runnable_avg_yN_inv = pelt32_runnable_avg_yN_inv;
35 
set_pelt(char * str)36 static int __init set_pelt(char *str)
37 {
38 	int rc, num;
39 
40 	rc = kstrtoint(str, 0, &num);
41 	if (rc) {
42 		pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
43 		return 0;
44 	}
45 
46 	switch (num) {
47 	case PELT8_LOAD_AVG_PERIOD:
48 		pelt_load_avg_period = PELT8_LOAD_AVG_PERIOD;
49 		pelt_load_avg_max = PELT8_LOAD_AVG_MAX;
50 		pelt_runnable_avg_yN_inv = pelt8_runnable_avg_yN_inv;
51 		pr_info("PELT half life is set to %dms\n", num);
52 		break;
53 	case PELT32_LOAD_AVG_PERIOD:
54 		pelt_load_avg_period = PELT32_LOAD_AVG_PERIOD;
55 		pelt_load_avg_max = PELT32_LOAD_AVG_MAX;
56 		pelt_runnable_avg_yN_inv = pelt32_runnable_avg_yN_inv;
57 		pr_info("PELT half life is set to %dms\n", num);
58 		break;
59 	default:
60 		pr_err("Default PELT half life is 32ms\n");
61 	}
62 
63 	return 0;
64 }
65 
66 early_param("pelt", set_pelt);
67 
68 /*
69  * Approximate:
70  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
71  */
decay_load(u64 val,u64 n)72 static u64 decay_load(u64 val, u64 n)
73 {
74 	unsigned int local_n;
75 
76 	if (unlikely(n > LOAD_AVG_PERIOD * 63))
77 		return 0;
78 
79 	/* after bounds checking we can collapse to 32-bit */
80 	local_n = n;
81 
82 	/*
83 	 * As y^PERIOD = 1/2, we can combine
84 	 *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
85 	 * With a look-up table which covers y^n (n<PERIOD)
86 	 *
87 	 * To achieve constant time decay_load.
88 	 */
89 	if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
90 		val >>= local_n / LOAD_AVG_PERIOD;
91 		local_n %= LOAD_AVG_PERIOD;
92 	}
93 
94 	val = mul_u64_u32_shr(val, pelt_runnable_avg_yN_inv[local_n], 32);
95 	return val;
96 }
97 
__accumulate_pelt_segments(u64 periods,u32 d1,u32 d3)98 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
99 {
100 	u32 c1, c2, c3 = d3; /* y^0 == 1 */
101 
102 	/*
103 	 * c1 = d1 y^p
104 	 */
105 	c1 = decay_load((u64)d1, periods);
106 
107 	/*
108 	 *            p-1
109 	 * c2 = 1024 \Sum y^n
110 	 *            n=1
111 	 *
112 	 *              inf        inf
113 	 *    = 1024 ( \Sum y^n - \Sum y^n - y^0 )
114 	 *              n=0        n=p
115 	 */
116 	c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
117 
118 	return c1 + c2 + c3;
119 }
120 
121 /*
122  * Accumulate the three separate parts of the sum; d1 the remainder
123  * of the last (incomplete) period, d2 the span of full periods and d3
124  * the remainder of the (incomplete) current period.
125  *
126  *           d1          d2           d3
127  *           ^           ^            ^
128  *           |           |            |
129  *         |<->|<----------------->|<--->|
130  * ... |---x---|------| ... |------|-----x (now)
131  *
132  *                           p-1
133  * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
134  *                           n=1
135  *
136  *    = u y^p +					(Step 1)
137  *
138  *                     p-1
139  *      d1 y^p + 1024 \Sum y^n + d3 y^0		(Step 2)
140  *                     n=1
141  */
142 static __always_inline u32
accumulate_sum(u64 delta,struct sched_avg * sa,unsigned long load,unsigned long runnable,int running)143 accumulate_sum(u64 delta, struct sched_avg *sa,
144 	       unsigned long load, unsigned long runnable, int running)
145 {
146 	u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
147 	u64 periods;
148 
149 	delta += sa->period_contrib;
150 	periods = delta / 1024; /* A period is 1024us (~1ms) */
151 
152 	/*
153 	 * Step 1: decay old *_sum if we crossed period boundaries.
154 	 */
155 	if (periods) {
156 		sa->load_sum = decay_load(sa->load_sum, periods);
157 		sa->runnable_sum =
158 			decay_load(sa->runnable_sum, periods);
159 		sa->util_sum = decay_load((u64)(sa->util_sum), periods);
160 
161 		/*
162 		 * Step 2
163 		 */
164 		delta %= 1024;
165 		if (load) {
166 			/*
167 			 * This relies on the:
168 			 *
169 			 * if (!load)
170 			 *	runnable = running = 0;
171 			 *
172 			 * clause from ___update_load_sum(); this results in
173 			 * the below usage of @contrib to dissapear entirely,
174 			 * so no point in calculating it.
175 			 */
176 			contrib = __accumulate_pelt_segments(periods,
177 					1024 - sa->period_contrib, delta);
178 		}
179 	}
180 	sa->period_contrib = delta;
181 
182 	if (load)
183 		sa->load_sum += load * contrib;
184 	if (runnable)
185 		sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT;
186 	if (running)
187 		sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
188 
189 	return periods;
190 }
191 
192 /*
193  * We can represent the historical contribution to runnable average as the
194  * coefficients of a geometric series.  To do this we sub-divide our runnable
195  * history into segments of approximately 1ms (1024us); label the segment that
196  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
197  *
198  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
199  *      p0            p1           p2
200  *     (now)       (~1ms ago)  (~2ms ago)
201  *
202  * Let u_i denote the fraction of p_i that the entity was runnable.
203  *
204  * We then designate the fractions u_i as our co-efficients, yielding the
205  * following representation of historical load:
206  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
207  *
208  * We choose y based on the with of a reasonably scheduling period, fixing:
209  *   y^32 = 0.5
210  *
211  * This means that the contribution to load ~32ms ago (u_32) will be weighted
212  * approximately half as much as the contribution to load within the last ms
213  * (u_0).
214  *
215  * When a period "rolls over" and we have new u_0`, multiplying the previous
216  * sum again by y is sufficient to update:
217  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
218  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
219  */
___update_load_sum(u64 now,struct sched_avg * sa,unsigned long load,unsigned long runnable,int running)220 int ___update_load_sum(u64 now, struct sched_avg *sa,
221 		       unsigned long load, unsigned long runnable, int running)
222 {
223 	u64 delta;
224 
225 	delta = now - sa->last_update_time;
226 	/*
227 	 * This should only happen when time goes backwards, which it
228 	 * unfortunately does during sched clock init when we swap over to TSC.
229 	 */
230 	if ((s64)delta < 0) {
231 		sa->last_update_time = now;
232 		return 0;
233 	}
234 
235 	/*
236 	 * Use 1024ns as the unit of measurement since it's a reasonable
237 	 * approximation of 1us and fast to compute.
238 	 */
239 	delta >>= 10;
240 	if (!delta)
241 		return 0;
242 
243 	sa->last_update_time += delta << 10;
244 
245 	/*
246 	 * running is a subset of runnable (weight) so running can't be set if
247 	 * runnable is clear. But there are some corner cases where the current
248 	 * se has been already dequeued but cfs_rq->curr still points to it.
249 	 * This means that weight will be 0 but not running for a sched_entity
250 	 * but also for a cfs_rq if the latter becomes idle. As an example,
251 	 * this happens during idle_balance() which calls
252 	 * update_blocked_averages().
253 	 *
254 	 * Also see the comment in accumulate_sum().
255 	 */
256 	if (!load)
257 		runnable = running = 0;
258 
259 	/*
260 	 * Now we know we crossed measurement unit boundaries. The *_avg
261 	 * accrues by two steps:
262 	 *
263 	 * Step 1: accumulate *_sum since last_update_time. If we haven't
264 	 * crossed period boundaries, finish.
265 	 */
266 	if (!accumulate_sum(delta, sa, load, runnable, running))
267 		return 0;
268 
269 	return 1;
270 }
271 EXPORT_SYMBOL_GPL(___update_load_sum);
272 
273 /*
274  * When syncing *_avg with *_sum, we must take into account the current
275  * position in the PELT segment otherwise the remaining part of the segment
276  * will be considered as idle time whereas it's not yet elapsed and this will
277  * generate unwanted oscillation in the range [1002..1024[.
278  *
279  * The max value of *_sum varies with the position in the time segment and is
280  * equals to :
281  *
282  *   LOAD_AVG_MAX*y + sa->period_contrib
283  *
284  * which can be simplified into:
285  *
286  *   LOAD_AVG_MAX - 1024 + sa->period_contrib
287  *
288  * because LOAD_AVG_MAX*y == LOAD_AVG_MAX-1024
289  *
290  * The same care must be taken when a sched entity is added, updated or
291  * removed from a cfs_rq and we need to update sched_avg. Scheduler entities
292  * and the cfs rq, to which they are attached, have the same position in the
293  * time segment because they use the same clock. This means that we can use
294  * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
295  * if it's more convenient.
296  */
___update_load_avg(struct sched_avg * sa,unsigned long load)297 void ___update_load_avg(struct sched_avg *sa, unsigned long load)
298 {
299 	u32 divider = get_pelt_divider(sa);
300 
301 	/*
302 	 * Step 2: update *_avg.
303 	 */
304 	sa->load_avg = div_u64(load * sa->load_sum, divider);
305 	sa->runnable_avg = div_u64(sa->runnable_sum, divider);
306 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
307 }
308 EXPORT_SYMBOL_GPL(___update_load_avg);
309 
310 /*
311  * sched_entity:
312  *
313  *   task:
314  *     se_weight()   = se->load.weight
315  *     se_runnable() = !!on_rq
316  *
317  *   group: [ see update_cfs_group() ]
318  *     se_weight()   = tg->weight * grq->load_avg / tg->load_avg
319  *     se_runnable() = grq->h_nr_running
320  *
321  *   runnable_sum = se_runnable() * runnable = grq->runnable_sum
322  *   runnable_avg = runnable_sum
323  *
324  *   load_sum := runnable
325  *   load_avg = se_weight(se) * load_sum
326  *
327  * cfq_rq:
328  *
329  *   runnable_sum = \Sum se->avg.runnable_sum
330  *   runnable_avg = \Sum se->avg.runnable_avg
331  *
332  *   load_sum = \Sum se_weight(se) * se->avg.load_sum
333  *   load_avg = \Sum se->avg.load_avg
334  */
335 
__update_load_avg_blocked_se(u64 now,struct sched_entity * se)336 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
337 {
338 	if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
339 		___update_load_avg(&se->avg, se_weight(se));
340 		trace_pelt_se_tp(se);
341 		return 1;
342 	}
343 
344 	return 0;
345 }
346 EXPORT_SYMBOL_GPL(__update_load_avg_blocked_se);
347 
__update_load_avg_se(u64 now,struct cfs_rq * cfs_rq,struct sched_entity * se)348 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
349 {
350 	if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se),
351 				cfs_rq->curr == se)) {
352 
353 		___update_load_avg(&se->avg, se_weight(se));
354 		cfs_se_util_change(&se->avg);
355 		trace_pelt_se_tp(se);
356 		return 1;
357 	}
358 
359 	return 0;
360 }
361 
__update_load_avg_cfs_rq(u64 now,struct cfs_rq * cfs_rq)362 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
363 {
364 	if (___update_load_sum(now, &cfs_rq->avg,
365 				scale_load_down(cfs_rq->load.weight),
366 				cfs_rq->h_nr_running,
367 				cfs_rq->curr != NULL)) {
368 
369 		___update_load_avg(&cfs_rq->avg, 1);
370 		trace_pelt_cfs_tp(cfs_rq);
371 		return 1;
372 	}
373 
374 	return 0;
375 }
376 
377 /*
378  * rt_rq:
379  *
380  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
381  *   util_sum = cpu_scale * load_sum
382  *   runnable_sum = util_sum
383  *
384  *   load_avg and runnable_avg are not supported and meaningless.
385  *
386  */
387 
update_rt_rq_load_avg(u64 now,struct rq * rq,int running)388 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
389 {
390 	if (___update_load_sum(now, &rq->avg_rt,
391 				running,
392 				running,
393 				running)) {
394 
395 		___update_load_avg(&rq->avg_rt, 1);
396 		trace_pelt_rt_tp(rq);
397 		return 1;
398 	}
399 
400 	return 0;
401 }
402 
403 /*
404  * dl_rq:
405  *
406  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
407  *   util_sum = cpu_scale * load_sum
408  *   runnable_sum = util_sum
409  *
410  *   load_avg and runnable_avg are not supported and meaningless.
411  *
412  */
413 
update_dl_rq_load_avg(u64 now,struct rq * rq,int running)414 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
415 {
416 	if (___update_load_sum(now, &rq->avg_dl,
417 				running,
418 				running,
419 				running)) {
420 
421 		___update_load_avg(&rq->avg_dl, 1);
422 		trace_pelt_dl_tp(rq);
423 		return 1;
424 	}
425 
426 	return 0;
427 }
428 
429 #ifdef CONFIG_SCHED_THERMAL_PRESSURE
430 /*
431  * thermal:
432  *
433  *   load_sum = \Sum se->avg.load_sum but se->avg.load_sum is not tracked
434  *
435  *   util_avg and runnable_load_avg are not supported and meaningless.
436  *
437  * Unlike rt/dl utilization tracking that track time spent by a cpu
438  * running a rt/dl task through util_avg, the average thermal pressure is
439  * tracked through load_avg. This is because thermal pressure signal is
440  * time weighted "delta" capacity unlike util_avg which is binary.
441  * "delta capacity" =  actual capacity  -
442  *			capped capacity a cpu due to a thermal event.
443  */
444 
update_thermal_load_avg(u64 now,struct rq * rq,u64 capacity)445 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
446 {
447 	if (___update_load_sum(now, &rq->avg_thermal,
448 			       capacity,
449 			       capacity,
450 			       capacity)) {
451 		___update_load_avg(&rq->avg_thermal, 1);
452 		trace_pelt_thermal_tp(rq);
453 		return 1;
454 	}
455 
456 	return 0;
457 }
458 #endif
459 
460 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
461 /*
462  * irq:
463  *
464  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
465  *   util_sum = cpu_scale * load_sum
466  *   runnable_sum = util_sum
467  *
468  *   load_avg and runnable_avg are not supported and meaningless.
469  *
470  */
471 
update_irq_load_avg(struct rq * rq,u64 running)472 int update_irq_load_avg(struct rq *rq, u64 running)
473 {
474 	int ret = 0;
475 
476 	/*
477 	 * We can't use clock_pelt because irq time is not accounted in
478 	 * clock_task. Instead we directly scale the running time to
479 	 * reflect the real amount of computation
480 	 */
481 	running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
482 	running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
483 
484 	/*
485 	 * We know the time that has been used by interrupt since last update
486 	 * but we don't when. Let be pessimistic and assume that interrupt has
487 	 * happened just before the update. This is not so far from reality
488 	 * because interrupt will most probably wake up task and trig an update
489 	 * of rq clock during which the metric is updated.
490 	 * We start to decay with normal context time and then we add the
491 	 * interrupt context time.
492 	 * We can safely remove running from rq->clock because
493 	 * rq->clock += delta with delta >= running
494 	 */
495 	ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
496 				0,
497 				0,
498 				0);
499 	ret += ___update_load_sum(rq->clock, &rq->avg_irq,
500 				1,
501 				1,
502 				1);
503 
504 	if (ret) {
505 		___update_load_avg(&rq->avg_irq, 1);
506 		trace_pelt_irq_tp(rq);
507 	}
508 
509 	return ret;
510 }
511 #endif
512 
513 DEFINE_PER_CPU(u64, clock_task_mult);
514 
515 unsigned int sysctl_sched_pelt_multiplier = 1;
516 __read_mostly unsigned int sched_pelt_lshift;
517 
sched_pelt_multiplier(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)518 int sched_pelt_multiplier(struct ctl_table *table, int write, void *buffer,
519 			  size_t *lenp, loff_t *ppos)
520 {
521 	static DEFINE_MUTEX(mutex);
522 	unsigned int old;
523 	int ret;
524 
525 	mutex_lock(&mutex);
526 
527 	old = sysctl_sched_pelt_multiplier;
528 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
529 	if (ret)
530 		goto undo;
531 	if (!write)
532 		goto done;
533 
534 	switch (sysctl_sched_pelt_multiplier)  {
535 	case 1:
536 		fallthrough;
537 	case 2:
538 		fallthrough;
539 	case 4:
540 		WRITE_ONCE(sched_pelt_lshift,
541 			   sysctl_sched_pelt_multiplier >> 1);
542 		goto done;
543 	default:
544 		ret = -EINVAL;
545 	}
546 
547 undo:
548 	sysctl_sched_pelt_multiplier = old;
549 done:
550 	mutex_unlock(&mutex);
551 
552 	return ret;
553 }
554