• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Per Entity Load Tracking
4  *
5  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  *
7  *  Interactivity improvements by Mike Galbraith
8  *  (C) 2007 Mike Galbraith <efault@gmx.de>
9  *
10  *  Various enhancements by Dmitry Adamushko.
11  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12  *
13  *  Group scheduling enhancements by Srivatsa Vaddagiri
14  *  Copyright IBM Corporation, 2007
15  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16  *
17  *  Scaled math optimizations by Thomas Gleixner
18  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19  *
20  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
22  *
23  *  Move PELT related code from fair.c into this pelt.c file
24  *  Author: Vincent Guittot <vincent.guittot@linaro.org>
25  */
26 
27 #include <linux/sched.h>
28 #include <trace/hooks/sched.h>
29 #include "sched.h"
30 #include "pelt.h"
31 
32 /*
33  * Approximate:
34  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
35  */
decay_load(u64 val,u64 n)36 static u64 decay_load(u64 val, u64 n)
37 {
38 	unsigned int local_n;
39 
40 	if (unlikely(n > LOAD_AVG_PERIOD * 63))
41 		return 0;
42 
43 	/* after bounds checking we can collapse to 32-bit */
44 	local_n = n;
45 
46 	/*
47 	 * As y^PERIOD = 1/2, we can combine
48 	 *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
49 	 * With a look-up table which covers y^n (n<PERIOD)
50 	 *
51 	 * To achieve constant time decay_load.
52 	 */
53 	if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
54 		val >>= local_n / LOAD_AVG_PERIOD;
55 		local_n %= LOAD_AVG_PERIOD;
56 	}
57 
58 	val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
59 	return val;
60 }
61 
__accumulate_pelt_segments(u64 periods,u32 d1,u32 d3)62 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
63 {
64 	u32 c1, c2, c3 = d3; /* y^0 == 1 */
65 
66 	/*
67 	 * c1 = d1 y^p
68 	 */
69 	c1 = decay_load((u64)d1, periods);
70 
71 	/*
72 	 *            p-1
73 	 * c2 = 1024 \Sum y^n
74 	 *            n=1
75 	 *
76 	 *              inf        inf
77 	 *    = 1024 ( \Sum y^n - \Sum y^n - y^0 )
78 	 *              n=0        n=p
79 	 */
80 	c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
81 
82 	return c1 + c2 + c3;
83 }
84 
85 /*
86  * Accumulate the three separate parts of the sum; d1 the remainder
87  * of the last (incomplete) period, d2 the span of full periods and d3
88  * the remainder of the (incomplete) current period.
89  *
90  *           d1          d2           d3
91  *           ^           ^            ^
92  *           |           |            |
93  *         |<->|<----------------->|<--->|
94  * ... |---x---|------| ... |------|-----x (now)
95  *
96  *                           p-1
97  * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
98  *                           n=1
99  *
100  *    = u y^p +					(Step 1)
101  *
102  *                     p-1
103  *      d1 y^p + 1024 \Sum y^n + d3 y^0		(Step 2)
104  *                     n=1
105  */
106 static __always_inline u32
accumulate_sum(u64 delta,struct sched_avg * sa,unsigned long load,unsigned long runnable,int running)107 accumulate_sum(u64 delta, struct sched_avg *sa,
108 	       unsigned long load, unsigned long runnable, int running)
109 {
110 	u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
111 	u64 periods;
112 
113 	delta += sa->period_contrib;
114 	periods = delta / 1024; /* A period is 1024us (~1ms) */
115 
116 	/*
117 	 * Step 1: decay old *_sum if we crossed period boundaries.
118 	 */
119 	if (periods) {
120 		sa->load_sum = decay_load(sa->load_sum, periods);
121 		sa->runnable_sum =
122 			decay_load(sa->runnable_sum, periods);
123 		sa->util_sum = decay_load((u64)(sa->util_sum), periods);
124 
125 		/*
126 		 * Step 2
127 		 */
128 		delta %= 1024;
129 		if (load) {
130 			/*
131 			 * This relies on the:
132 			 *
133 			 * if (!load)
134 			 *	runnable = running = 0;
135 			 *
136 			 * clause from ___update_load_sum(); this results in
137 			 * the below usage of @contrib to disappear entirely,
138 			 * so no point in calculating it.
139 			 */
140 			contrib = __accumulate_pelt_segments(periods,
141 					1024 - sa->period_contrib, delta);
142 		}
143 	}
144 	sa->period_contrib = delta;
145 
146 	if (load)
147 		sa->load_sum += load * contrib;
148 	if (runnable)
149 		sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT;
150 	if (running)
151 		sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
152 
153 	return periods;
154 }
155 
156 /*
157  * We can represent the historical contribution to runnable average as the
158  * coefficients of a geometric series.  To do this we sub-divide our runnable
159  * history into segments of approximately 1ms (1024us); label the segment that
160  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
161  *
162  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
163  *      p0            p1           p2
164  *     (now)       (~1ms ago)  (~2ms ago)
165  *
166  * Let u_i denote the fraction of p_i that the entity was runnable.
167  *
168  * We then designate the fractions u_i as our co-efficients, yielding the
169  * following representation of historical load:
170  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
171  *
172  * We choose y based on the with of a reasonably scheduling period, fixing:
173  *   y^32 = 0.5
174  *
175  * This means that the contribution to load ~32ms ago (u_32) will be weighted
176  * approximately half as much as the contribution to load within the last ms
177  * (u_0).
178  *
179  * When a period "rolls over" and we have new u_0`, multiplying the previous
180  * sum again by y is sufficient to update:
181  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
182  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
183  */
___update_load_sum(u64 now,struct sched_avg * sa,unsigned long load,unsigned long runnable,int running)184 int ___update_load_sum(u64 now, struct sched_avg *sa,
185 		       unsigned long load, unsigned long runnable, int running)
186 {
187 	u64 delta;
188 
189 	delta = now - sa->last_update_time;
190 	/*
191 	 * This should only happen when time goes backwards, which it
192 	 * unfortunately does during sched clock init when we swap over to TSC.
193 	 */
194 	if ((s64)delta < 0) {
195 		sa->last_update_time = now;
196 		return 0;
197 	}
198 
199 	/*
200 	 * Use 1024ns as the unit of measurement since it's a reasonable
201 	 * approximation of 1us and fast to compute.
202 	 */
203 	delta >>= 10;
204 	if (!delta)
205 		return 0;
206 
207 	sa->last_update_time += delta << 10;
208 
209 	trace_android_rvh_update_load_sum(sa, &delta, &sched_pelt_lshift);
210 
211 	/*
212 	 * running is a subset of runnable (weight) so running can't be set if
213 	 * runnable is clear. But there are some corner cases where the current
214 	 * se has been already dequeued but cfs_rq->curr still points to it.
215 	 * This means that weight will be 0 but not running for a sched_entity
216 	 * but also for a cfs_rq if the latter becomes idle. As an example,
217 	 * this happens during idle_balance() which calls
218 	 * update_blocked_averages().
219 	 *
220 	 * Also see the comment in accumulate_sum().
221 	 */
222 	if (!load)
223 		runnable = running = 0;
224 
225 	/*
226 	 * Now we know we crossed measurement unit boundaries. The *_avg
227 	 * accrues by two steps:
228 	 *
229 	 * Step 1: accumulate *_sum since last_update_time. If we haven't
230 	 * crossed period boundaries, finish.
231 	 */
232 	if (!accumulate_sum(delta, sa, load, runnable, running))
233 		return 0;
234 
235 	return 1;
236 }
237 EXPORT_SYMBOL_GPL(___update_load_sum);
238 
239 /*
240  * When syncing *_avg with *_sum, we must take into account the current
241  * position in the PELT segment otherwise the remaining part of the segment
242  * will be considered as idle time whereas it's not yet elapsed and this will
243  * generate unwanted oscillation in the range [1002..1024[.
244  *
245  * The max value of *_sum varies with the position in the time segment and is
246  * equals to :
247  *
248  *   LOAD_AVG_MAX*y + sa->period_contrib
249  *
250  * which can be simplified into:
251  *
252  *   LOAD_AVG_MAX - 1024 + sa->period_contrib
253  *
254  * because LOAD_AVG_MAX*y == LOAD_AVG_MAX-1024
255  *
256  * The same care must be taken when a sched entity is added, updated or
257  * removed from a cfs_rq and we need to update sched_avg. Scheduler entities
258  * and the cfs rq, to which they are attached, have the same position in the
259  * time segment because they use the same clock. This means that we can use
260  * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
261  * if it's more convenient.
262  */
___update_load_avg(struct sched_avg * sa,unsigned long load)263 void ___update_load_avg(struct sched_avg *sa, unsigned long load)
264 {
265 	u32 divider = get_pelt_divider(sa);
266 
267 	/*
268 	 * Step 2: update *_avg.
269 	 */
270 	sa->load_avg = div_u64(load * sa->load_sum, divider);
271 	sa->runnable_avg = div_u64(sa->runnable_sum, divider);
272 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
273 }
274 EXPORT_SYMBOL_GPL(___update_load_avg);
275 
276 /*
277  * sched_entity:
278  *
279  *   task:
280  *     se_weight()   = se->load.weight
281  *     se_runnable() = !!on_rq
282  *
283  *   group: [ see update_cfs_group() ]
284  *     se_weight()   = tg->weight * grq->load_avg / tg->load_avg
285  *     se_runnable() = grq->h_nr_running
286  *
287  *   runnable_sum = se_runnable() * runnable = grq->runnable_sum
288  *   runnable_avg = runnable_sum
289  *
290  *   load_sum := runnable
291  *   load_avg = se_weight(se) * load_sum
292  *
293  * cfq_rq:
294  *
295  *   runnable_sum = \Sum se->avg.runnable_sum
296  *   runnable_avg = \Sum se->avg.runnable_avg
297  *
298  *   load_sum = \Sum se_weight(se) * se->avg.load_sum
299  *   load_avg = \Sum se->avg.load_avg
300  */
301 
__update_load_avg_blocked_se(u64 now,struct sched_entity * se)302 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
303 {
304 	if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
305 		___update_load_avg(&se->avg, se_weight(se));
306 		trace_pelt_se_tp(se);
307 		return 1;
308 	}
309 
310 	return 0;
311 }
312 EXPORT_SYMBOL_GPL(__update_load_avg_blocked_se);
313 
__update_load_avg_se(u64 now,struct cfs_rq * cfs_rq,struct sched_entity * se)314 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
315 {
316 	if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se),
317 				cfs_rq->curr == se)) {
318 
319 		___update_load_avg(&se->avg, se_weight(se));
320 		cfs_se_util_change(&se->avg);
321 		trace_pelt_se_tp(se);
322 		return 1;
323 	}
324 
325 	return 0;
326 }
327 
__update_load_avg_cfs_rq(u64 now,struct cfs_rq * cfs_rq)328 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
329 {
330 	if (___update_load_sum(now, &cfs_rq->avg,
331 				scale_load_down(cfs_rq->load.weight),
332 				cfs_rq->h_nr_running,
333 				cfs_rq->curr != NULL)) {
334 
335 		___update_load_avg(&cfs_rq->avg, 1);
336 		trace_pelt_cfs_tp(cfs_rq);
337 		return 1;
338 	}
339 
340 	return 0;
341 }
342 
343 /*
344  * rt_rq:
345  *
346  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
347  *   util_sum = cpu_scale * load_sum
348  *   runnable_sum = util_sum
349  *
350  *   load_avg and runnable_avg are not supported and meaningless.
351  *
352  */
353 
update_rt_rq_load_avg(u64 now,struct rq * rq,int running)354 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
355 {
356 	if (___update_load_sum(now, &rq->avg_rt,
357 				running,
358 				running,
359 				running)) {
360 
361 		___update_load_avg(&rq->avg_rt, 1);
362 		trace_pelt_rt_tp(rq);
363 		return 1;
364 	}
365 
366 	return 0;
367 }
368 
369 /*
370  * dl_rq:
371  *
372  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
373  *   util_sum = cpu_scale * load_sum
374  *   runnable_sum = util_sum
375  *
376  *   load_avg and runnable_avg are not supported and meaningless.
377  *
378  */
379 
update_dl_rq_load_avg(u64 now,struct rq * rq,int running)380 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
381 {
382 	if (___update_load_sum(now, &rq->avg_dl,
383 				running,
384 				running,
385 				running)) {
386 
387 		___update_load_avg(&rq->avg_dl, 1);
388 		trace_pelt_dl_tp(rq);
389 		return 1;
390 	}
391 
392 	return 0;
393 }
394 
395 #ifdef CONFIG_SCHED_THERMAL_PRESSURE
396 /*
397  * thermal:
398  *
399  *   load_sum = \Sum se->avg.load_sum but se->avg.load_sum is not tracked
400  *
401  *   util_avg and runnable_load_avg are not supported and meaningless.
402  *
403  * Unlike rt/dl utilization tracking that track time spent by a cpu
404  * running a rt/dl task through util_avg, the average thermal pressure is
405  * tracked through load_avg. This is because thermal pressure signal is
406  * time weighted "delta" capacity unlike util_avg which is binary.
407  * "delta capacity" =  actual capacity  -
408  *			capped capacity a cpu due to a thermal event.
409  */
410 
update_thermal_load_avg(u64 now,struct rq * rq,u64 capacity)411 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
412 {
413 	if (___update_load_sum(now, &rq->avg_thermal,
414 			       capacity,
415 			       capacity,
416 			       capacity)) {
417 		___update_load_avg(&rq->avg_thermal, 1);
418 		trace_pelt_thermal_tp(rq);
419 		return 1;
420 	}
421 
422 	return 0;
423 }
424 #endif
425 
426 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
427 /*
428  * irq:
429  *
430  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
431  *   util_sum = cpu_scale * load_sum
432  *   runnable_sum = util_sum
433  *
434  *   load_avg and runnable_avg are not supported and meaningless.
435  *
436  */
437 
update_irq_load_avg(struct rq * rq,u64 running)438 int update_irq_load_avg(struct rq *rq, u64 running)
439 {
440 	int ret = 0;
441 
442 	/*
443 	 * We can't use clock_pelt because irq time is not accounted in
444 	 * clock_task. Instead we directly scale the running time to
445 	 * reflect the real amount of computation
446 	 */
447 	running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
448 	running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
449 
450 	/*
451 	 * We know the time that has been used by interrupt since last update
452 	 * but we don't when. Let be pessimistic and assume that interrupt has
453 	 * happened just before the update. This is not so far from reality
454 	 * because interrupt will most probably wake up task and trig an update
455 	 * of rq clock during which the metric is updated.
456 	 * We start to decay with normal context time and then we add the
457 	 * interrupt context time.
458 	 * We can safely remove running from rq->clock because
459 	 * rq->clock += delta with delta >= running
460 	 */
461 	ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
462 				0,
463 				0,
464 				0);
465 	ret += ___update_load_sum(rq->clock, &rq->avg_irq,
466 				1,
467 				1,
468 				1);
469 
470 	if (ret) {
471 		___update_load_avg(&rq->avg_irq, 1);
472 		trace_pelt_irq_tp(rq);
473 	}
474 
475 	return ret;
476 }
477 #endif
478 
479 unsigned int sysctl_sched_pelt_multiplier = 1;
480 __read_mostly unsigned int sched_pelt_lshift;
481 
sched_pelt_multiplier(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)482 int sched_pelt_multiplier(struct ctl_table *table, int write, void *buffer,
483 			  size_t *lenp, loff_t *ppos)
484 {
485 	static DEFINE_MUTEX(mutex);
486 	unsigned int old;
487 	int ret;
488 
489 	mutex_lock(&mutex);
490 
491 	old = sysctl_sched_pelt_multiplier;
492 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
493 	if (ret)
494 		goto undo;
495 	if (!write)
496 		goto done;
497 
498 	switch (sysctl_sched_pelt_multiplier)  {
499 	case 1:
500 		fallthrough;
501 	case 2:
502 		fallthrough;
503 	case 4:
504 		WRITE_ONCE(sched_pelt_lshift,
505 			   sysctl_sched_pelt_multiplier >> 1);
506 		goto done;
507 	default:
508 		ret = -EINVAL;
509 	}
510 
511 undo:
512 	sysctl_sched_pelt_multiplier = old;
513 done:
514 	mutex_unlock(&mutex);
515 
516 	return ret;
517 }
518