• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4  *
5  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  *
7  *  Interactivity improvements by Mike Galbraith
8  *  (C) 2007 Mike Galbraith <efault@gmx.de>
9  *
10  *  Various enhancements by Dmitry Adamushko.
11  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12  *
13  *  Group scheduling enhancements by Srivatsa Vaddagiri
14  *  Copyright IBM Corporation, 2007
15  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16  *
17  *  Scaled math optimizations by Thomas Gleixner
18  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19  *
20  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
22  */
23 #include <linux/energy_model.h>
24 #include <linux/mmap_lock.h>
25 #include <linux/hugetlb_inline.h>
26 #include <linux/jiffies.h>
27 #include <linux/mm_api.h>
28 #include <linux/highmem.h>
29 #include <linux/spinlock_api.h>
30 #include <linux/cpumask_api.h>
31 #include <linux/lockdep_api.h>
32 #include <linux/softirq.h>
33 #include <linux/refcount_api.h>
34 #include <linux/topology.h>
35 #include <linux/sched/clock.h>
36 #include <linux/sched/cond_resched.h>
37 #include <linux/sched/cputime.h>
38 #include <linux/sched/isolation.h>
39 #include <linux/sched/nohz.h>
40 
41 #include <linux/cpuidle.h>
42 #include <linux/interrupt.h>
43 #include <linux/memory-tiers.h>
44 #include <linux/mempolicy.h>
45 #include <linux/mutex_api.h>
46 #include <linux/profile.h>
47 #include <linux/psi.h>
48 #include <linux/ratelimit.h>
49 #include <linux/task_work.h>
50 #include <linux/rbtree_augmented.h>
51 
52 #include <asm/switch_to.h>
53 
54 #include <linux/sched/cond_resched.h>
55 
56 #include "sched.h"
57 #include "stats.h"
58 #include "autogroup.h"
59 
60 #include <trace/hooks/sched.h>
61 
62 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_runtime);
63 
64 /*
65  * Targeted preemption latency for CPU-bound tasks:
66  *
67  * NOTE: this latency value is not the same as the concept of
68  * 'timeslice length' - timeslices in CFS are of variable length
69  * and have no persistent notion like in traditional, time-slice
70  * based scheduling concepts.
71  *
72  * (to see the precise effective timeslice length of your workload,
73  *  run vmstat and monitor the context-switches (cs) field)
74  *
75  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
76  */
77 unsigned int sysctl_sched_latency			= 6000000ULL;
78 EXPORT_SYMBOL_GPL(sysctl_sched_latency);
79 
80 /*
81  * The initial- and re-scaling of tunables is configurable
82  *
83  * Options are:
84  *
85  *   SCHED_TUNABLESCALING_NONE - unscaled, always *1
86  *   SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
87  *   SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
88  *
89  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
90  */
91 unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
92 
93 /*
94  * Minimal preemption granularity for CPU-bound tasks:
95  *
96  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
97  */
98 unsigned int sysctl_sched_base_slice			= 750000ULL;
99 EXPORT_SYMBOL_GPL(sysctl_sched_base_slice);
100 static unsigned int normalized_sysctl_sched_base_slice	= 750000ULL;
101 
102 /*
103  * After fork, child runs first. If set to 0 (default) then
104  * parent will (try to) run first.
105  */
106 unsigned int sysctl_sched_child_runs_first __read_mostly;
107 
108 const_debug unsigned int sysctl_sched_migration_cost	= 500000UL;
109 
110 int sched_thermal_decay_shift;
setup_sched_thermal_decay_shift(char * str)111 static int __init setup_sched_thermal_decay_shift(char *str)
112 {
113 	int _shift = 0;
114 
115 	if (kstrtoint(str, 0, &_shift))
116 		pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n");
117 
118 	sched_thermal_decay_shift = clamp(_shift, 0, 10);
119 	return 1;
120 }
121 __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
122 
123 #ifdef CONFIG_SMP
124 /*
125  * For asym packing, by default the lower numbered CPU has higher priority.
126  */
arch_asym_cpu_priority(int cpu)127 int __weak arch_asym_cpu_priority(int cpu)
128 {
129 	return -cpu;
130 }
131 
132 /*
133  * The margin used when comparing utilization with CPU capacity.
134  *
135  * (default: ~20%)
136  */
137 #define fits_capacity(cap, max)	((cap) * 1280 < (max) * 1024)
138 
139 /*
140  * The margin used when comparing CPU capacities.
141  * is 'cap1' noticeably greater than 'cap2'
142  *
143  * (default: ~5%)
144  */
145 #define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078)
146 #endif
147 
148 #ifdef CONFIG_CFS_BANDWIDTH
149 /*
150  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
151  * each time a cfs_rq requests quota.
152  *
153  * Note: in the case that the slice exceeds the runtime remaining (either due
154  * to consumption or the quota being specified to be smaller than the slice)
155  * we will always only issue the remaining available time.
156  *
157  * (default: 5 msec, units: microseconds)
158  */
159 static unsigned int sysctl_sched_cfs_bandwidth_slice		= 5000UL;
160 #endif
161 
162 #ifdef CONFIG_NUMA_BALANCING
163 /* Restrict the NUMA promotion throughput (MB/s) for each target node. */
164 static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
165 #endif
166 
167 #ifdef CONFIG_SYSCTL
168 static struct ctl_table sched_fair_sysctls[] = {
169 	{
170 		.procname       = "sched_child_runs_first",
171 		.data           = &sysctl_sched_child_runs_first,
172 		.maxlen         = sizeof(unsigned int),
173 		.mode           = 0644,
174 		.proc_handler   = proc_dointvec,
175 	},
176 #ifdef CONFIG_CFS_BANDWIDTH
177 	{
178 		.procname       = "sched_cfs_bandwidth_slice_us",
179 		.data           = &sysctl_sched_cfs_bandwidth_slice,
180 		.maxlen         = sizeof(unsigned int),
181 		.mode           = 0644,
182 		.proc_handler   = proc_dointvec_minmax,
183 		.extra1         = SYSCTL_ONE,
184 	},
185 #endif
186 #ifdef CONFIG_NUMA_BALANCING
187 	{
188 		.procname	= "numa_balancing_promote_rate_limit_MBps",
189 		.data		= &sysctl_numa_balancing_promote_rate_limit,
190 		.maxlen		= sizeof(unsigned int),
191 		.mode		= 0644,
192 		.proc_handler	= proc_dointvec_minmax,
193 		.extra1		= SYSCTL_ZERO,
194 	},
195 #endif /* CONFIG_NUMA_BALANCING */
196 	{}
197 };
198 
sched_fair_sysctl_init(void)199 static int __init sched_fair_sysctl_init(void)
200 {
201 	register_sysctl_init("kernel", sched_fair_sysctls);
202 	return 0;
203 }
204 late_initcall(sched_fair_sysctl_init);
205 #endif
206 
update_load_add(struct load_weight * lw,unsigned long inc)207 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
208 {
209 	lw->weight += inc;
210 	lw->inv_weight = 0;
211 }
212 
update_load_sub(struct load_weight * lw,unsigned long dec)213 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
214 {
215 	lw->weight -= dec;
216 	lw->inv_weight = 0;
217 }
218 
update_load_set(struct load_weight * lw,unsigned long w)219 static inline void update_load_set(struct load_weight *lw, unsigned long w)
220 {
221 	lw->weight = w;
222 	lw->inv_weight = 0;
223 }
224 
225 /*
226  * Increase the granularity value when there are more CPUs,
227  * because with more CPUs the 'effective latency' as visible
228  * to users decreases. But the relationship is not linear,
229  * so pick a second-best guess by going with the log2 of the
230  * number of CPUs.
231  *
232  * This idea comes from the SD scheduler of Con Kolivas:
233  */
get_update_sysctl_factor(void)234 static unsigned int get_update_sysctl_factor(void)
235 {
236 	unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
237 	unsigned int factor;
238 
239 	switch (sysctl_sched_tunable_scaling) {
240 	case SCHED_TUNABLESCALING_NONE:
241 		factor = 1;
242 		break;
243 	case SCHED_TUNABLESCALING_LINEAR:
244 		factor = cpus;
245 		break;
246 	case SCHED_TUNABLESCALING_LOG:
247 	default:
248 		factor = 1 + ilog2(cpus);
249 		break;
250 	}
251 
252 	return factor;
253 }
254 
update_sysctl(void)255 static void update_sysctl(void)
256 {
257 	unsigned int factor = get_update_sysctl_factor();
258 
259 #define SET_SYSCTL(name) \
260 	(sysctl_##name = (factor) * normalized_sysctl_##name)
261 	SET_SYSCTL(sched_base_slice);
262 #undef SET_SYSCTL
263 }
264 
sched_init_granularity(void)265 void __init sched_init_granularity(void)
266 {
267 	update_sysctl();
268 }
269 
270 #define WMULT_CONST	(~0U)
271 #define WMULT_SHIFT	32
272 
__update_inv_weight(struct load_weight * lw)273 static void __update_inv_weight(struct load_weight *lw)
274 {
275 	unsigned long w;
276 
277 	if (likely(lw->inv_weight))
278 		return;
279 
280 	w = scale_load_down(lw->weight);
281 
282 	if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
283 		lw->inv_weight = 1;
284 	else if (unlikely(!w))
285 		lw->inv_weight = WMULT_CONST;
286 	else
287 		lw->inv_weight = WMULT_CONST / w;
288 }
289 
290 /*
291  * delta_exec * weight / lw.weight
292  *   OR
293  * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
294  *
295  * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
296  * we're guaranteed shift stays positive because inv_weight is guaranteed to
297  * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
298  *
299  * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
300  * weight/lw.weight <= 1, and therefore our shift will also be positive.
301  */
__calc_delta(u64 delta_exec,unsigned long weight,struct load_weight * lw)302 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
303 {
304 	u64 fact = scale_load_down(weight);
305 	u32 fact_hi = (u32)(fact >> 32);
306 	int shift = WMULT_SHIFT;
307 	int fs;
308 
309 	__update_inv_weight(lw);
310 
311 	if (unlikely(fact_hi)) {
312 		fs = fls(fact_hi);
313 		shift -= fs;
314 		fact >>= fs;
315 	}
316 
317 	fact = mul_u32_u32(fact, lw->inv_weight);
318 
319 	fact_hi = (u32)(fact >> 32);
320 	if (fact_hi) {
321 		fs = fls(fact_hi);
322 		shift -= fs;
323 		fact >>= fs;
324 	}
325 
326 	return mul_u64_u32_shr(delta_exec, fact, shift);
327 }
328 
329 /*
330  * delta /= w
331  */
calc_delta_fair(u64 delta,struct sched_entity * se)332 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
333 {
334 	if (unlikely(se->load.weight != NICE_0_LOAD))
335 		delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
336 
337 	return delta;
338 }
339 
340 const struct sched_class fair_sched_class;
341 
342 /**************************************************************
343  * CFS operations on generic schedulable entities:
344  */
345 
346 #ifdef CONFIG_FAIR_GROUP_SCHED
347 
348 /* Walk up scheduling entities hierarchy */
349 #define for_each_sched_entity(se) \
350 		for (; se; se = se->parent)
351 
list_add_leaf_cfs_rq(struct cfs_rq * cfs_rq)352 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
353 {
354 	struct rq *rq = rq_of(cfs_rq);
355 	int cpu = cpu_of(rq);
356 
357 	if (cfs_rq->on_list)
358 		return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
359 
360 	cfs_rq->on_list = 1;
361 
362 	/*
363 	 * Ensure we either appear before our parent (if already
364 	 * enqueued) or force our parent to appear after us when it is
365 	 * enqueued. The fact that we always enqueue bottom-up
366 	 * reduces this to two cases and a special case for the root
367 	 * cfs_rq. Furthermore, it also means that we will always reset
368 	 * tmp_alone_branch either when the branch is connected
369 	 * to a tree or when we reach the top of the tree
370 	 */
371 	if (cfs_rq->tg->parent &&
372 	    cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
373 		/*
374 		 * If parent is already on the list, we add the child
375 		 * just before. Thanks to circular linked property of
376 		 * the list, this means to put the child at the tail
377 		 * of the list that starts by parent.
378 		 */
379 		list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
380 			&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
381 		/*
382 		 * The branch is now connected to its tree so we can
383 		 * reset tmp_alone_branch to the beginning of the
384 		 * list.
385 		 */
386 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
387 		return true;
388 	}
389 
390 	if (!cfs_rq->tg->parent) {
391 		/*
392 		 * cfs rq without parent should be put
393 		 * at the tail of the list.
394 		 */
395 		list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
396 			&rq->leaf_cfs_rq_list);
397 		/*
398 		 * We have reach the top of a tree so we can reset
399 		 * tmp_alone_branch to the beginning of the list.
400 		 */
401 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
402 		return true;
403 	}
404 
405 	/*
406 	 * The parent has not already been added so we want to
407 	 * make sure that it will be put after us.
408 	 * tmp_alone_branch points to the begin of the branch
409 	 * where we will add parent.
410 	 */
411 	list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
412 	/*
413 	 * update tmp_alone_branch to points to the new begin
414 	 * of the branch
415 	 */
416 	rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
417 	return false;
418 }
419 
list_del_leaf_cfs_rq(struct cfs_rq * cfs_rq)420 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421 {
422 	if (cfs_rq->on_list) {
423 		struct rq *rq = rq_of(cfs_rq);
424 
425 		/*
426 		 * With cfs_rq being unthrottled/throttled during an enqueue,
427 		 * it can happen the tmp_alone_branch points the a leaf that
428 		 * we finally want to del. In this case, tmp_alone_branch moves
429 		 * to the prev element but it will point to rq->leaf_cfs_rq_list
430 		 * at the end of the enqueue.
431 		 */
432 		if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
433 			rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
434 
435 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
436 		cfs_rq->on_list = 0;
437 	}
438 }
439 
assert_list_leaf_cfs_rq(struct rq * rq)440 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
441 {
442 	SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
443 }
444 
445 /* Iterate thr' all leaf cfs_rq's on a runqueue */
446 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)			\
447 	list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,	\
448 				 leaf_cfs_rq_list)
449 
450 /* Do the two (enqueued) entities belong to the same group ? */
451 static inline struct cfs_rq *
is_same_group(struct sched_entity * se,struct sched_entity * pse)452 is_same_group(struct sched_entity *se, struct sched_entity *pse)
453 {
454 	if (se->cfs_rq == pse->cfs_rq)
455 		return se->cfs_rq;
456 
457 	return NULL;
458 }
459 
parent_entity(const struct sched_entity * se)460 static inline struct sched_entity *parent_entity(const struct sched_entity *se)
461 {
462 	return se->parent;
463 }
464 
465 static void
find_matching_se(struct sched_entity ** se,struct sched_entity ** pse)466 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
467 {
468 	int se_depth, pse_depth;
469 
470 	/*
471 	 * preemption test can be made between sibling entities who are in the
472 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
473 	 * both tasks until we find their ancestors who are siblings of common
474 	 * parent.
475 	 */
476 
477 	/* First walk up until both entities are at same depth */
478 	se_depth = (*se)->depth;
479 	pse_depth = (*pse)->depth;
480 
481 	while (se_depth > pse_depth) {
482 		se_depth--;
483 		*se = parent_entity(*se);
484 	}
485 
486 	while (pse_depth > se_depth) {
487 		pse_depth--;
488 		*pse = parent_entity(*pse);
489 	}
490 
491 	while (!is_same_group(*se, *pse)) {
492 		*se = parent_entity(*se);
493 		*pse = parent_entity(*pse);
494 	}
495 }
496 
tg_is_idle(struct task_group * tg)497 static int tg_is_idle(struct task_group *tg)
498 {
499 	return tg->idle > 0;
500 }
501 
cfs_rq_is_idle(struct cfs_rq * cfs_rq)502 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
503 {
504 	return cfs_rq->idle > 0;
505 }
506 
se_is_idle(struct sched_entity * se)507 static int se_is_idle(struct sched_entity *se)
508 {
509 	if (entity_is_task(se))
510 		return task_has_idle_policy(task_of(se));
511 	return cfs_rq_is_idle(group_cfs_rq(se));
512 }
513 
514 #else	/* !CONFIG_FAIR_GROUP_SCHED */
515 
516 #define for_each_sched_entity(se) \
517 		for (; se; se = NULL)
518 
list_add_leaf_cfs_rq(struct cfs_rq * cfs_rq)519 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
520 {
521 	return true;
522 }
523 
list_del_leaf_cfs_rq(struct cfs_rq * cfs_rq)524 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
525 {
526 }
527 
assert_list_leaf_cfs_rq(struct rq * rq)528 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
529 {
530 }
531 
532 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)	\
533 		for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
534 
parent_entity(struct sched_entity * se)535 static inline struct sched_entity *parent_entity(struct sched_entity *se)
536 {
537 	return NULL;
538 }
539 
540 static inline void
find_matching_se(struct sched_entity ** se,struct sched_entity ** pse)541 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
542 {
543 }
544 
tg_is_idle(struct task_group * tg)545 static inline int tg_is_idle(struct task_group *tg)
546 {
547 	return 0;
548 }
549 
cfs_rq_is_idle(struct cfs_rq * cfs_rq)550 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
551 {
552 	return 0;
553 }
554 
se_is_idle(struct sched_entity * se)555 static int se_is_idle(struct sched_entity *se)
556 {
557 	return 0;
558 }
559 
560 #endif	/* CONFIG_FAIR_GROUP_SCHED */
561 
562 static __always_inline
563 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
564 
565 /**************************************************************
566  * Scheduling class tree data structure manipulation methods:
567  */
568 
max_vruntime(u64 max_vruntime,u64 vruntime)569 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
570 {
571 	s64 delta = (s64)(vruntime - max_vruntime);
572 	if (delta > 0)
573 		max_vruntime = vruntime;
574 
575 	return max_vruntime;
576 }
577 
min_vruntime(u64 min_vruntime,u64 vruntime)578 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
579 {
580 	s64 delta = (s64)(vruntime - min_vruntime);
581 	if (delta < 0)
582 		min_vruntime = vruntime;
583 
584 	return min_vruntime;
585 }
586 
entity_before(const struct sched_entity * a,const struct sched_entity * b)587 static inline bool entity_before(const struct sched_entity *a,
588 				 const struct sched_entity *b)
589 {
590 	/*
591 	 * Tiebreak on vruntime seems unnecessary since it can
592 	 * hardly happen.
593 	 */
594 	return (s64)(a->deadline - b->deadline) < 0;
595 }
596 
entity_key(struct cfs_rq * cfs_rq,struct sched_entity * se)597 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
598 {
599 	return (s64)(se->vruntime - cfs_rq->min_vruntime);
600 }
601 
602 #define __node_2_se(node) \
603 	rb_entry((node), struct sched_entity, run_node)
604 
605 /*
606  * Compute virtual time from the per-task service numbers:
607  *
608  * Fair schedulers conserve lag:
609  *
610  *   \Sum lag_i = 0
611  *
612  * Where lag_i is given by:
613  *
614  *   lag_i = S - s_i = w_i * (V - v_i)
615  *
616  * Where S is the ideal service time and V is it's virtual time counterpart.
617  * Therefore:
618  *
619  *   \Sum lag_i = 0
620  *   \Sum w_i * (V - v_i) = 0
621  *   \Sum w_i * V - w_i * v_i = 0
622  *
623  * From which we can solve an expression for V in v_i (which we have in
624  * se->vruntime):
625  *
626  *       \Sum v_i * w_i   \Sum v_i * w_i
627  *   V = -------------- = --------------
628  *          \Sum w_i            W
629  *
630  * Specifically, this is the weighted average of all entity virtual runtimes.
631  *
632  * [[ NOTE: this is only equal to the ideal scheduler under the condition
633  *          that join/leave operations happen at lag_i = 0, otherwise the
634  *          virtual time has non-continguous motion equivalent to:
635  *
636  *	      V +-= lag_i / W
637  *
638  *	    Also see the comment in place_entity() that deals with this. ]]
639  *
640  * However, since v_i is u64, and the multiplcation could easily overflow
641  * transform it into a relative form that uses smaller quantities:
642  *
643  * Substitute: v_i == (v_i - v0) + v0
644  *
645  *     \Sum ((v_i - v0) + v0) * w_i   \Sum (v_i - v0) * w_i
646  * V = ---------------------------- = --------------------- + v0
647  *                  W                            W
648  *
649  * Which we track using:
650  *
651  *                    v0 := cfs_rq->min_vruntime
652  * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
653  *              \Sum w_i := cfs_rq->avg_load
654  *
655  * Since min_vruntime is a monotonic increasing variable that closely tracks
656  * the per-task service, these deltas: (v_i - v), will be in the order of the
657  * maximal (virtual) lag induced in the system due to quantisation.
658  *
659  * Also, we use scale_load_down() to reduce the size.
660  *
661  * As measured, the max (key * weight) value was ~44 bits for a kernel build.
662  */
663 static void
avg_vruntime_add(struct cfs_rq * cfs_rq,struct sched_entity * se)664 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
665 {
666 	unsigned long weight = scale_load_down(se->load.weight);
667 	s64 key = entity_key(cfs_rq, se);
668 
669 	cfs_rq->avg_vruntime += key * weight;
670 	cfs_rq->avg_load += weight;
671 }
672 
673 static void
avg_vruntime_sub(struct cfs_rq * cfs_rq,struct sched_entity * se)674 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
675 {
676 	unsigned long weight = scale_load_down(se->load.weight);
677 	s64 key = entity_key(cfs_rq, se);
678 
679 	cfs_rq->avg_vruntime -= key * weight;
680 	cfs_rq->avg_load -= weight;
681 }
682 
683 static inline
avg_vruntime_update(struct cfs_rq * cfs_rq,s64 delta)684 void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
685 {
686 	/*
687 	 * v' = v + d ==> avg_vruntime' = avg_runtime - d*avg_load
688 	 */
689 	cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
690 }
691 
692 /*
693  * Specifically: avg_runtime() + 0 must result in entity_eligible() := true
694  * For this to be so, the result of this function must have a left bias.
695  */
avg_vruntime(struct cfs_rq * cfs_rq)696 u64 avg_vruntime(struct cfs_rq *cfs_rq)
697 {
698 	struct sched_entity *curr = cfs_rq->curr;
699 	s64 avg = cfs_rq->avg_vruntime;
700 	long load = cfs_rq->avg_load;
701 
702 	if (curr && curr->on_rq) {
703 		unsigned long weight = scale_load_down(curr->load.weight);
704 
705 		avg += entity_key(cfs_rq, curr) * weight;
706 		load += weight;
707 	}
708 
709 	if (load) {
710 		/* sign flips effective floor / ceil */
711 		if (avg < 0)
712 			avg -= (load - 1);
713 		avg = div_s64(avg, load);
714 	}
715 
716 	return cfs_rq->min_vruntime + avg;
717 }
718 
719 /*
720  * lag_i = S - s_i = w_i * (V - v_i)
721  *
722  * However, since V is approximated by the weighted average of all entities it
723  * is possible -- by addition/removal/reweight to the tree -- to move V around
724  * and end up with a larger lag than we started with.
725  *
726  * Limit this to either double the slice length with a minimum of TICK_NSEC
727  * since that is the timing granularity.
728  *
729  * EEVDF gives the following limit for a steady state system:
730  *
731  *   -r_max < lag < max(r_max, q)
732  *
733  * XXX could add max_slice to the augmented data to track this.
734  */
entity_lag(u64 avruntime,struct sched_entity * se)735 static s64 entity_lag(u64 avruntime, struct sched_entity *se)
736 {
737 	s64 vlag, limit;
738 
739 	vlag = avruntime - se->vruntime;
740 	limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
741 
742 	return clamp(vlag, -limit, limit);
743 }
744 
update_entity_lag(struct cfs_rq * cfs_rq,struct sched_entity * se)745 static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
746 {
747 	SCHED_WARN_ON(!se->on_rq);
748 
749 	se->vlag = entity_lag(avg_vruntime(cfs_rq), se);
750 }
751 
752 /*
753  * Entity is eligible once it received less service than it ought to have,
754  * eg. lag >= 0.
755  *
756  * lag_i = S - s_i = w_i*(V - v_i)
757  *
758  * lag_i >= 0 -> V >= v_i
759  *
760  *     \Sum (v_i - v)*w_i
761  * V = ------------------ + v
762  *          \Sum w_i
763  *
764  * lag_i >= 0 -> \Sum (v_i - v)*w_i >= (v_i - v)*(\Sum w_i)
765  *
766  * Note: using 'avg_vruntime() > se->vruntime' is inacurate due
767  *       to the loss in precision caused by the division.
768  */
vruntime_eligible(struct cfs_rq * cfs_rq,u64 vruntime)769 static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
770 {
771 	struct sched_entity *curr = cfs_rq->curr;
772 	s64 avg = cfs_rq->avg_vruntime;
773 	long load = cfs_rq->avg_load;
774 
775 	if (curr && curr->on_rq) {
776 		unsigned long weight = scale_load_down(curr->load.weight);
777 
778 		avg += entity_key(cfs_rq, curr) * weight;
779 		load += weight;
780 	}
781 
782 	return avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load;
783 }
784 
entity_eligible(struct cfs_rq * cfs_rq,struct sched_entity * se)785 int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
786 {
787 	return vruntime_eligible(cfs_rq, se->vruntime);
788 }
789 
__update_min_vruntime(struct cfs_rq * cfs_rq,u64 vruntime)790 static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime)
791 {
792 	u64 min_vruntime = cfs_rq->min_vruntime;
793 	/*
794 	 * open coded max_vruntime() to allow updating avg_vruntime
795 	 */
796 	s64 delta = (s64)(vruntime - min_vruntime);
797 	if (delta > 0) {
798 		avg_vruntime_update(cfs_rq, delta);
799 		min_vruntime = vruntime;
800 	}
801 	return min_vruntime;
802 }
803 
update_min_vruntime(struct cfs_rq * cfs_rq)804 static void update_min_vruntime(struct cfs_rq *cfs_rq)
805 {
806 	struct sched_entity *se = __pick_root_entity(cfs_rq);
807 	struct sched_entity *curr = cfs_rq->curr;
808 	u64 vruntime = cfs_rq->min_vruntime;
809 
810 	if (curr) {
811 		if (curr->on_rq)
812 			vruntime = curr->vruntime;
813 		else
814 			curr = NULL;
815 	}
816 
817 	if (se) {
818 		if (!curr)
819 			vruntime = se->min_vruntime;
820 		else
821 			vruntime = min_vruntime(vruntime, se->min_vruntime);
822 	}
823 
824 	/* ensure we never gain time by being placed backwards. */
825 	u64_u32_store(cfs_rq->min_vruntime,
826 		      __update_min_vruntime(cfs_rq, vruntime));
827 }
828 
__entity_less(struct rb_node * a,const struct rb_node * b)829 static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
830 {
831 	return entity_before(__node_2_se(a), __node_2_se(b));
832 }
833 
834 #define vruntime_gt(field, lse, rse) ({ (s64)((lse)->field - (rse)->field) > 0; })
835 
__min_vruntime_update(struct sched_entity * se,struct rb_node * node)836 static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node *node)
837 {
838 	if (node) {
839 		struct sched_entity *rse = __node_2_se(node);
840 		if (vruntime_gt(min_vruntime, se, rse))
841 			se->min_vruntime = rse->min_vruntime;
842 	}
843 }
844 
845 /*
846  * se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
847  */
min_vruntime_update(struct sched_entity * se,bool exit)848 static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
849 {
850 	u64 old_min_vruntime = se->min_vruntime;
851 	struct rb_node *node = &se->run_node;
852 
853 	se->min_vruntime = se->vruntime;
854 	__min_vruntime_update(se, node->rb_right);
855 	__min_vruntime_update(se, node->rb_left);
856 
857 	return se->min_vruntime == old_min_vruntime;
858 }
859 
860 RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
861 		     run_node, min_vruntime, min_vruntime_update);
862 
863 /*
864  * Enqueue an entity into the rb-tree:
865  */
__enqueue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)866 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
867 {
868 	trace_android_rvh_enqueue_entity(cfs_rq, se);
869 	avg_vruntime_add(cfs_rq, se);
870 	se->min_vruntime = se->vruntime;
871 	rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
872 				__entity_less, &min_vruntime_cb);
873 }
874 
__dequeue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)875 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
876 {
877 	trace_android_rvh_dequeue_entity(cfs_rq, se);
878 	rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
879 				  &min_vruntime_cb);
880 	avg_vruntime_sub(cfs_rq, se);
881 }
882 
__pick_root_entity(struct cfs_rq * cfs_rq)883 struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq)
884 {
885 	struct rb_node *root = cfs_rq->tasks_timeline.rb_root.rb_node;
886 
887 	if (!root)
888 		return NULL;
889 
890 	return __node_2_se(root);
891 }
892 
__pick_first_entity(struct cfs_rq * cfs_rq)893 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
894 {
895 	struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
896 
897 	if (!left)
898 		return NULL;
899 
900 	return __node_2_se(left);
901 }
902 
903 /*
904  * Earliest Eligible Virtual Deadline First
905  *
906  * In order to provide latency guarantees for different request sizes
907  * EEVDF selects the best runnable task from two criteria:
908  *
909  *  1) the task must be eligible (must be owed service)
910  *
911  *  2) from those tasks that meet 1), we select the one
912  *     with the earliest virtual deadline.
913  *
914  * We can do this in O(log n) time due to an augmented RB-tree. The
915  * tree keeps the entries sorted on deadline, but also functions as a
916  * heap based on the vruntime by keeping:
917  *
918  *  se->min_vruntime = min(se->vruntime, se->{left,right}->min_vruntime)
919  *
920  * Which allows tree pruning through eligibility.
921  */
pick_eevdf(struct cfs_rq * cfs_rq)922 static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
923 {
924 	struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
925 	struct sched_entity *se = __pick_first_entity(cfs_rq);
926 	struct sched_entity *curr = cfs_rq->curr;
927 	struct sched_entity *best = NULL;
928 
929 	/*
930 	 * We can safely skip eligibility check if there is only one entity
931 	 * in this cfs_rq, saving some cycles.
932 	 */
933 	if (cfs_rq->nr_running == 1)
934 		return curr && curr->on_rq ? curr : se;
935 
936 	if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
937 		curr = NULL;
938 
939 	/*
940 	 * Once selected, run a task until it either becomes non-eligible or
941 	 * until it gets a new slice. See the HACK in set_next_entity().
942 	 */
943 	if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
944 		return curr;
945 
946 	/* Pick the leftmost entity if it's eligible */
947 	if (se && entity_eligible(cfs_rq, se)) {
948 		best = se;
949 		goto found;
950 	}
951 
952 	/* Heap search for the EEVD entity */
953 	while (node) {
954 		struct rb_node *left = node->rb_left;
955 
956 		/*
957 		 * Eligible entities in left subtree are always better
958 		 * choices, since they have earlier deadlines.
959 		 */
960 		if (left && vruntime_eligible(cfs_rq,
961 					__node_2_se(left)->min_vruntime)) {
962 			node = left;
963 			continue;
964 		}
965 
966 		se = __node_2_se(node);
967 
968 		/*
969 		 * The left subtree either is empty or has no eligible
970 		 * entity, so check the current node since it is the one
971 		 * with earliest deadline that might be eligible.
972 		 */
973 		if (entity_eligible(cfs_rq, se)) {
974 			best = se;
975 			break;
976 		}
977 
978 		node = node->rb_right;
979 	}
980 found:
981 	if (!best || (curr && entity_before(curr, best)))
982 		best = curr;
983 
984 	return best;
985 }
986 
987 #ifdef CONFIG_SCHED_DEBUG
__pick_last_entity(struct cfs_rq * cfs_rq)988 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
989 {
990 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
991 
992 	if (!last)
993 		return NULL;
994 
995 	return __node_2_se(last);
996 }
997 
998 /**************************************************************
999  * Scheduling class statistics methods:
1000  */
1001 #ifdef CONFIG_SMP
sched_update_scaling(void)1002 int sched_update_scaling(void)
1003 {
1004 	unsigned int factor = get_update_sysctl_factor();
1005 
1006 #define WRT_SYSCTL(name) \
1007 	(normalized_sysctl_##name = sysctl_##name / (factor))
1008 	WRT_SYSCTL(sched_base_slice);
1009 #undef WRT_SYSCTL
1010 
1011 	return 0;
1012 }
1013 #endif
1014 #endif
1015 
1016 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
1017 
1018 /*
1019  * XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
1020  * this is probably good enough.
1021  */
update_deadline(struct cfs_rq * cfs_rq,struct sched_entity * se)1022 static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
1023 {
1024 	bool skip_preempt = false;
1025 
1026 	trace_android_rvh_update_deadline(cfs_rq, se, &skip_preempt);
1027 	if (skip_preempt)
1028 		return;
1029 
1030 	if ((s64)(se->vruntime - se->deadline) < 0)
1031 		return;
1032 
1033 	/*
1034 	 * For EEVDF the virtual time slope is determined by w_i (iow.
1035 	 * nice) while the request time r_i is determined by
1036 	 * sysctl_sched_base_slice.
1037 	 */
1038 	se->slice = sysctl_sched_base_slice;
1039 
1040 	/*
1041 	 * EEVDF: vd_i = ve_i + r_i / w_i
1042 	 */
1043 	se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
1044 
1045 	/*
1046 	 * The task has consumed its request, reschedule.
1047 	 */
1048 	if (cfs_rq->nr_running > 1) {
1049 		resched_curr(rq_of(cfs_rq));
1050 		clear_buddies(cfs_rq, se);
1051 	}
1052 }
1053 
1054 #include "pelt.h"
1055 #ifdef CONFIG_SMP
1056 
1057 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
1058 static unsigned long task_h_load(struct task_struct *p);
1059 static unsigned long capacity_of(int cpu);
1060 
1061 /* Give new sched_entity start runnable values to heavy its load in infant time */
init_entity_runnable_average(struct sched_entity * se)1062 void init_entity_runnable_average(struct sched_entity *se)
1063 {
1064 	struct sched_avg *sa = &se->avg;
1065 
1066 	memset(sa, 0, sizeof(*sa));
1067 
1068 	/*
1069 	 * Tasks are initialized with full load to be seen as heavy tasks until
1070 	 * they get a chance to stabilize to their real load level.
1071 	 * Group entities are initialized with zero load to reflect the fact that
1072 	 * nothing has been attached to the task group yet.
1073 	 */
1074 	if (entity_is_task(se))
1075 		sa->load_avg = scale_load_down(se->load.weight);
1076 
1077 	/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
1078 }
1079 
1080 /*
1081  * With new tasks being created, their initial util_avgs are extrapolated
1082  * based on the cfs_rq's current util_avg:
1083  *
1084  *   util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
1085  *
1086  * However, in many cases, the above util_avg does not give a desired
1087  * value. Moreover, the sum of the util_avgs may be divergent, such
1088  * as when the series is a harmonic series.
1089  *
1090  * To solve this problem, we also cap the util_avg of successive tasks to
1091  * only 1/2 of the left utilization budget:
1092  *
1093  *   util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
1094  *
1095  * where n denotes the nth task and cpu_scale the CPU capacity.
1096  *
1097  * For example, for a CPU with 1024 of capacity, a simplest series from
1098  * the beginning would be like:
1099  *
1100  *  task  util_avg: 512, 256, 128,  64,  32,   16,    8, ...
1101  * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
1102  *
1103  * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
1104  * if util_avg > util_avg_cap.
1105  */
post_init_entity_util_avg(struct task_struct * p)1106 void post_init_entity_util_avg(struct task_struct *p)
1107 {
1108 	struct sched_entity *se = &p->se;
1109 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
1110 	struct sched_avg *sa = &se->avg;
1111 	long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
1112 	long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
1113 
1114 	if (p->sched_class != &fair_sched_class) {
1115 		/*
1116 		 * For !fair tasks do:
1117 		 *
1118 		update_cfs_rq_load_avg(now, cfs_rq);
1119 		attach_entity_load_avg(cfs_rq, se);
1120 		switched_from_fair(rq, p);
1121 		 *
1122 		 * such that the next switched_to_fair() has the
1123 		 * expected state.
1124 		 */
1125 		se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
1126 		return;
1127 	}
1128 
1129 	if (cap > 0) {
1130 		if (cfs_rq->avg.util_avg != 0) {
1131 			sa->util_avg  = cfs_rq->avg.util_avg * se->load.weight;
1132 			sa->util_avg /= (cfs_rq->avg.load_avg + 1);
1133 
1134 			if (sa->util_avg > cap)
1135 				sa->util_avg = cap;
1136 		} else {
1137 			sa->util_avg = cap;
1138 		}
1139 	}
1140 
1141 	sa->runnable_avg = sa->util_avg;
1142 
1143 	/* Hook before this se's util is attached to cfs_rq's util */
1144 	trace_android_rvh_post_init_entity_util_avg(se);
1145 }
1146 
1147 #else /* !CONFIG_SMP */
init_entity_runnable_average(struct sched_entity * se)1148 void init_entity_runnable_average(struct sched_entity *se)
1149 {
1150 }
post_init_entity_util_avg(struct task_struct * p)1151 void post_init_entity_util_avg(struct task_struct *p)
1152 {
1153 }
update_tg_load_avg(struct cfs_rq * cfs_rq)1154 static void update_tg_load_avg(struct cfs_rq *cfs_rq)
1155 {
1156 }
1157 #endif /* CONFIG_SMP */
1158 
1159 /*
1160  * Update the current task's runtime statistics.
1161  */
update_curr(struct cfs_rq * cfs_rq)1162 static void update_curr(struct cfs_rq *cfs_rq)
1163 {
1164 	struct sched_entity *curr = cfs_rq->curr;
1165 	u64 now = rq_clock_task(rq_of(cfs_rq));
1166 	u64 delta_exec;
1167 
1168 	if (unlikely(!curr))
1169 		return;
1170 
1171 	delta_exec = now - curr->exec_start;
1172 	if (unlikely((s64)delta_exec <= 0))
1173 		return;
1174 
1175 	curr->exec_start = now;
1176 
1177 	if (schedstat_enabled()) {
1178 		struct sched_statistics *stats;
1179 
1180 		stats = __schedstats_from_se(curr);
1181 		__schedstat_set(stats->exec_max,
1182 				max(delta_exec, stats->exec_max));
1183 	}
1184 
1185 	curr->sum_exec_runtime += delta_exec;
1186 	schedstat_add(cfs_rq->exec_clock, delta_exec);
1187 
1188 	curr->vruntime += calc_delta_fair(delta_exec, curr);
1189 	update_deadline(cfs_rq, curr);
1190 	update_min_vruntime(cfs_rq);
1191 
1192 	if (entity_is_task(curr)) {
1193 		struct task_struct *curtask = task_of(curr);
1194 
1195 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
1196 		cgroup_account_cputime(curtask, delta_exec);
1197 		account_group_exec_runtime(curtask, delta_exec);
1198 	}
1199 
1200 	account_cfs_rq_runtime(cfs_rq, delta_exec);
1201 }
1202 
update_curr_fair(struct rq * rq)1203 static void update_curr_fair(struct rq *rq)
1204 {
1205 	update_curr(cfs_rq_of(&rq->curr->se));
1206 }
1207 
1208 static inline void
update_stats_wait_start_fair(struct cfs_rq * cfs_rq,struct sched_entity * se)1209 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1210 {
1211 	struct sched_statistics *stats;
1212 	struct task_struct *p = NULL;
1213 
1214 	if (!schedstat_enabled())
1215 		return;
1216 
1217 	stats = __schedstats_from_se(se);
1218 
1219 	if (entity_is_task(se))
1220 		p = task_of(se);
1221 
1222 	__update_stats_wait_start(rq_of(cfs_rq), p, stats);
1223 }
1224 
1225 static inline void
update_stats_wait_end_fair(struct cfs_rq * cfs_rq,struct sched_entity * se)1226 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1227 {
1228 	struct sched_statistics *stats;
1229 	struct task_struct *p = NULL;
1230 
1231 	if (!schedstat_enabled())
1232 		return;
1233 
1234 	stats = __schedstats_from_se(se);
1235 
1236 	/*
1237 	 * When the sched_schedstat changes from 0 to 1, some sched se
1238 	 * maybe already in the runqueue, the se->statistics.wait_start
1239 	 * will be 0.So it will let the delta wrong. We need to avoid this
1240 	 * scenario.
1241 	 */
1242 	if (unlikely(!schedstat_val(stats->wait_start)))
1243 		return;
1244 
1245 	if (entity_is_task(se))
1246 		p = task_of(se);
1247 
1248 	__update_stats_wait_end(rq_of(cfs_rq), p, stats);
1249 }
1250 
1251 static inline void
update_stats_enqueue_sleeper_fair(struct cfs_rq * cfs_rq,struct sched_entity * se)1252 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1253 {
1254 	struct sched_statistics *stats;
1255 	struct task_struct *tsk = NULL;
1256 
1257 	if (!schedstat_enabled())
1258 		return;
1259 
1260 	stats = __schedstats_from_se(se);
1261 
1262 	if (entity_is_task(se))
1263 		tsk = task_of(se);
1264 
1265 	__update_stats_enqueue_sleeper(rq_of(cfs_rq), tsk, stats);
1266 }
1267 
1268 /*
1269  * Task is being enqueued - update stats:
1270  */
1271 static inline void
update_stats_enqueue_fair(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)1272 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1273 {
1274 	if (!schedstat_enabled())
1275 		return;
1276 
1277 	/*
1278 	 * Are we enqueueing a waiting task? (for current tasks
1279 	 * a dequeue/enqueue event is a NOP)
1280 	 */
1281 	if (se != cfs_rq->curr)
1282 		update_stats_wait_start_fair(cfs_rq, se);
1283 
1284 	if (flags & ENQUEUE_WAKEUP)
1285 		update_stats_enqueue_sleeper_fair(cfs_rq, se);
1286 }
1287 
1288 static inline void
update_stats_dequeue_fair(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)1289 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1290 {
1291 
1292 	if (!schedstat_enabled())
1293 		return;
1294 
1295 	/*
1296 	 * Mark the end of the wait period if dequeueing a
1297 	 * waiting task:
1298 	 */
1299 	if (se != cfs_rq->curr)
1300 		update_stats_wait_end_fair(cfs_rq, se);
1301 
1302 	if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1303 		struct task_struct *tsk = task_of(se);
1304 		unsigned int state;
1305 
1306 		/* XXX racy against TTWU */
1307 		state = READ_ONCE(tsk->__state);
1308 		if (state & TASK_INTERRUPTIBLE)
1309 			__schedstat_set(tsk->stats.sleep_start,
1310 				      rq_clock(rq_of(cfs_rq)));
1311 		if (state & TASK_UNINTERRUPTIBLE)
1312 			__schedstat_set(tsk->stats.block_start,
1313 				      rq_clock(rq_of(cfs_rq)));
1314 	}
1315 }
1316 
1317 /*
1318  * We are picking a new current task - update its stats:
1319  */
1320 static inline void
update_stats_curr_start(struct cfs_rq * cfs_rq,struct sched_entity * se)1321 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1322 {
1323 	/*
1324 	 * We are starting a new run period:
1325 	 */
1326 	se->exec_start = rq_clock_task(rq_of(cfs_rq));
1327 }
1328 
1329 /**************************************************
1330  * Scheduling class queueing methods:
1331  */
1332 
is_core_idle(int cpu)1333 static inline bool is_core_idle(int cpu)
1334 {
1335 #ifdef CONFIG_SCHED_SMT
1336 	int sibling;
1337 
1338 	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
1339 		if (cpu == sibling)
1340 			continue;
1341 
1342 		if (!idle_cpu(sibling))
1343 			return false;
1344 	}
1345 #endif
1346 
1347 	return true;
1348 }
1349 
1350 #ifdef CONFIG_NUMA
1351 #define NUMA_IMBALANCE_MIN 2
1352 
1353 static inline long
adjust_numa_imbalance(int imbalance,int dst_running,int imb_numa_nr)1354 adjust_numa_imbalance(int imbalance, int dst_running, int imb_numa_nr)
1355 {
1356 	/*
1357 	 * Allow a NUMA imbalance if busy CPUs is less than the maximum
1358 	 * threshold. Above this threshold, individual tasks may be contending
1359 	 * for both memory bandwidth and any shared HT resources.  This is an
1360 	 * approximation as the number of running tasks may not be related to
1361 	 * the number of busy CPUs due to sched_setaffinity.
1362 	 */
1363 	if (dst_running > imb_numa_nr)
1364 		return imbalance;
1365 
1366 	/*
1367 	 * Allow a small imbalance based on a simple pair of communicating
1368 	 * tasks that remain local when the destination is lightly loaded.
1369 	 */
1370 	if (imbalance <= NUMA_IMBALANCE_MIN)
1371 		return 0;
1372 
1373 	return imbalance;
1374 }
1375 #endif /* CONFIG_NUMA */
1376 
1377 #ifdef CONFIG_NUMA_BALANCING
1378 /*
1379  * Approximate time to scan a full NUMA task in ms. The task scan period is
1380  * calculated based on the tasks virtual memory size and
1381  * numa_balancing_scan_size.
1382  */
1383 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
1384 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
1385 
1386 /* Portion of address space to scan in MB */
1387 unsigned int sysctl_numa_balancing_scan_size = 256;
1388 
1389 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1390 unsigned int sysctl_numa_balancing_scan_delay = 1000;
1391 
1392 /* The page with hint page fault latency < threshold in ms is considered hot */
1393 unsigned int sysctl_numa_balancing_hot_threshold = MSEC_PER_SEC;
1394 
1395 struct numa_group {
1396 	refcount_t refcount;
1397 
1398 	spinlock_t lock; /* nr_tasks, tasks */
1399 	int nr_tasks;
1400 	pid_t gid;
1401 	int active_nodes;
1402 
1403 	struct rcu_head rcu;
1404 	unsigned long total_faults;
1405 	unsigned long max_faults_cpu;
1406 	/*
1407 	 * faults[] array is split into two regions: faults_mem and faults_cpu.
1408 	 *
1409 	 * Faults_cpu is used to decide whether memory should move
1410 	 * towards the CPU. As a consequence, these stats are weighted
1411 	 * more by CPU use than by memory faults.
1412 	 */
1413 	unsigned long faults[];
1414 };
1415 
1416 /*
1417  * For functions that can be called in multiple contexts that permit reading
1418  * ->numa_group (see struct task_struct for locking rules).
1419  */
deref_task_numa_group(struct task_struct * p)1420 static struct numa_group *deref_task_numa_group(struct task_struct *p)
1421 {
1422 	return rcu_dereference_check(p->numa_group, p == current ||
1423 		(lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu)));
1424 }
1425 
deref_curr_numa_group(struct task_struct * p)1426 static struct numa_group *deref_curr_numa_group(struct task_struct *p)
1427 {
1428 	return rcu_dereference_protected(p->numa_group, p == current);
1429 }
1430 
1431 static inline unsigned long group_faults_priv(struct numa_group *ng);
1432 static inline unsigned long group_faults_shared(struct numa_group *ng);
1433 
task_nr_scan_windows(struct task_struct * p)1434 static unsigned int task_nr_scan_windows(struct task_struct *p)
1435 {
1436 	unsigned long rss = 0;
1437 	unsigned long nr_scan_pages;
1438 
1439 	/*
1440 	 * Calculations based on RSS as non-present and empty pages are skipped
1441 	 * by the PTE scanner and NUMA hinting faults should be trapped based
1442 	 * on resident pages
1443 	 */
1444 	nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1445 	rss = get_mm_rss(p->mm);
1446 	if (!rss)
1447 		rss = nr_scan_pages;
1448 
1449 	rss = round_up(rss, nr_scan_pages);
1450 	return rss / nr_scan_pages;
1451 }
1452 
1453 /* For sanity's sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1454 #define MAX_SCAN_WINDOW 2560
1455 
task_scan_min(struct task_struct * p)1456 static unsigned int task_scan_min(struct task_struct *p)
1457 {
1458 	unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
1459 	unsigned int scan, floor;
1460 	unsigned int windows = 1;
1461 
1462 	if (scan_size < MAX_SCAN_WINDOW)
1463 		windows = MAX_SCAN_WINDOW / scan_size;
1464 	floor = 1000 / windows;
1465 
1466 	scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1467 	return max_t(unsigned int, floor, scan);
1468 }
1469 
task_scan_start(struct task_struct * p)1470 static unsigned int task_scan_start(struct task_struct *p)
1471 {
1472 	unsigned long smin = task_scan_min(p);
1473 	unsigned long period = smin;
1474 	struct numa_group *ng;
1475 
1476 	/* Scale the maximum scan period with the amount of shared memory. */
1477 	rcu_read_lock();
1478 	ng = rcu_dereference(p->numa_group);
1479 	if (ng) {
1480 		unsigned long shared = group_faults_shared(ng);
1481 		unsigned long private = group_faults_priv(ng);
1482 
1483 		period *= refcount_read(&ng->refcount);
1484 		period *= shared + 1;
1485 		period /= private + shared + 1;
1486 	}
1487 	rcu_read_unlock();
1488 
1489 	return max(smin, period);
1490 }
1491 
task_scan_max(struct task_struct * p)1492 static unsigned int task_scan_max(struct task_struct *p)
1493 {
1494 	unsigned long smin = task_scan_min(p);
1495 	unsigned long smax;
1496 	struct numa_group *ng;
1497 
1498 	/* Watch for min being lower than max due to floor calculations */
1499 	smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
1500 
1501 	/* Scale the maximum scan period with the amount of shared memory. */
1502 	ng = deref_curr_numa_group(p);
1503 	if (ng) {
1504 		unsigned long shared = group_faults_shared(ng);
1505 		unsigned long private = group_faults_priv(ng);
1506 		unsigned long period = smax;
1507 
1508 		period *= refcount_read(&ng->refcount);
1509 		period *= shared + 1;
1510 		period /= private + shared + 1;
1511 
1512 		smax = max(smax, period);
1513 	}
1514 
1515 	return max(smin, smax);
1516 }
1517 
account_numa_enqueue(struct rq * rq,struct task_struct * p)1518 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1519 {
1520 	rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
1521 	rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1522 }
1523 
account_numa_dequeue(struct rq * rq,struct task_struct * p)1524 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1525 {
1526 	rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
1527 	rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1528 }
1529 
1530 /* Shared or private faults. */
1531 #define NR_NUMA_HINT_FAULT_TYPES 2
1532 
1533 /* Memory and CPU locality */
1534 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1535 
1536 /* Averaged statistics, and temporary buffers. */
1537 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1538 
task_numa_group_id(struct task_struct * p)1539 pid_t task_numa_group_id(struct task_struct *p)
1540 {
1541 	struct numa_group *ng;
1542 	pid_t gid = 0;
1543 
1544 	rcu_read_lock();
1545 	ng = rcu_dereference(p->numa_group);
1546 	if (ng)
1547 		gid = ng->gid;
1548 	rcu_read_unlock();
1549 
1550 	return gid;
1551 }
1552 
1553 /*
1554  * The averaged statistics, shared & private, memory & CPU,
1555  * occupy the first half of the array. The second half of the
1556  * array is for current counters, which are averaged into the
1557  * first set by task_numa_placement.
1558  */
task_faults_idx(enum numa_faults_stats s,int nid,int priv)1559 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
1560 {
1561 	return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
1562 }
1563 
task_faults(struct task_struct * p,int nid)1564 static inline unsigned long task_faults(struct task_struct *p, int nid)
1565 {
1566 	if (!p->numa_faults)
1567 		return 0;
1568 
1569 	return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1570 		p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
1571 }
1572 
group_faults(struct task_struct * p,int nid)1573 static inline unsigned long group_faults(struct task_struct *p, int nid)
1574 {
1575 	struct numa_group *ng = deref_task_numa_group(p);
1576 
1577 	if (!ng)
1578 		return 0;
1579 
1580 	return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1581 		ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1582 }
1583 
group_faults_cpu(struct numa_group * group,int nid)1584 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1585 {
1586 	return group->faults[task_faults_idx(NUMA_CPU, nid, 0)] +
1587 		group->faults[task_faults_idx(NUMA_CPU, nid, 1)];
1588 }
1589 
group_faults_priv(struct numa_group * ng)1590 static inline unsigned long group_faults_priv(struct numa_group *ng)
1591 {
1592 	unsigned long faults = 0;
1593 	int node;
1594 
1595 	for_each_online_node(node) {
1596 		faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
1597 	}
1598 
1599 	return faults;
1600 }
1601 
group_faults_shared(struct numa_group * ng)1602 static inline unsigned long group_faults_shared(struct numa_group *ng)
1603 {
1604 	unsigned long faults = 0;
1605 	int node;
1606 
1607 	for_each_online_node(node) {
1608 		faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
1609 	}
1610 
1611 	return faults;
1612 }
1613 
1614 /*
1615  * A node triggering more than 1/3 as many NUMA faults as the maximum is
1616  * considered part of a numa group's pseudo-interleaving set. Migrations
1617  * between these nodes are slowed down, to allow things to settle down.
1618  */
1619 #define ACTIVE_NODE_FRACTION 3
1620 
numa_is_active_node(int nid,struct numa_group * ng)1621 static bool numa_is_active_node(int nid, struct numa_group *ng)
1622 {
1623 	return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1624 }
1625 
1626 /* Handle placement on systems where not all nodes are directly connected. */
score_nearby_nodes(struct task_struct * p,int nid,int lim_dist,bool task)1627 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1628 					int lim_dist, bool task)
1629 {
1630 	unsigned long score = 0;
1631 	int node, max_dist;
1632 
1633 	/*
1634 	 * All nodes are directly connected, and the same distance
1635 	 * from each other. No need for fancy placement algorithms.
1636 	 */
1637 	if (sched_numa_topology_type == NUMA_DIRECT)
1638 		return 0;
1639 
1640 	/* sched_max_numa_distance may be changed in parallel. */
1641 	max_dist = READ_ONCE(sched_max_numa_distance);
1642 	/*
1643 	 * This code is called for each node, introducing N^2 complexity,
1644 	 * which should be ok given the number of nodes rarely exceeds 8.
1645 	 */
1646 	for_each_online_node(node) {
1647 		unsigned long faults;
1648 		int dist = node_distance(nid, node);
1649 
1650 		/*
1651 		 * The furthest away nodes in the system are not interesting
1652 		 * for placement; nid was already counted.
1653 		 */
1654 		if (dist >= max_dist || node == nid)
1655 			continue;
1656 
1657 		/*
1658 		 * On systems with a backplane NUMA topology, compare groups
1659 		 * of nodes, and move tasks towards the group with the most
1660 		 * memory accesses. When comparing two nodes at distance
1661 		 * "hoplimit", only nodes closer by than "hoplimit" are part
1662 		 * of each group. Skip other nodes.
1663 		 */
1664 		if (sched_numa_topology_type == NUMA_BACKPLANE && dist >= lim_dist)
1665 			continue;
1666 
1667 		/* Add up the faults from nearby nodes. */
1668 		if (task)
1669 			faults = task_faults(p, node);
1670 		else
1671 			faults = group_faults(p, node);
1672 
1673 		/*
1674 		 * On systems with a glueless mesh NUMA topology, there are
1675 		 * no fixed "groups of nodes". Instead, nodes that are not
1676 		 * directly connected bounce traffic through intermediate
1677 		 * nodes; a numa_group can occupy any set of nodes.
1678 		 * The further away a node is, the less the faults count.
1679 		 * This seems to result in good task placement.
1680 		 */
1681 		if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1682 			faults *= (max_dist - dist);
1683 			faults /= (max_dist - LOCAL_DISTANCE);
1684 		}
1685 
1686 		score += faults;
1687 	}
1688 
1689 	return score;
1690 }
1691 
1692 /*
1693  * These return the fraction of accesses done by a particular task, or
1694  * task group, on a particular numa node.  The group weight is given a
1695  * larger multiplier, in order to group tasks together that are almost
1696  * evenly spread out between numa nodes.
1697  */
task_weight(struct task_struct * p,int nid,int dist)1698 static inline unsigned long task_weight(struct task_struct *p, int nid,
1699 					int dist)
1700 {
1701 	unsigned long faults, total_faults;
1702 
1703 	if (!p->numa_faults)
1704 		return 0;
1705 
1706 	total_faults = p->total_numa_faults;
1707 
1708 	if (!total_faults)
1709 		return 0;
1710 
1711 	faults = task_faults(p, nid);
1712 	faults += score_nearby_nodes(p, nid, dist, true);
1713 
1714 	return 1000 * faults / total_faults;
1715 }
1716 
group_weight(struct task_struct * p,int nid,int dist)1717 static inline unsigned long group_weight(struct task_struct *p, int nid,
1718 					 int dist)
1719 {
1720 	struct numa_group *ng = deref_task_numa_group(p);
1721 	unsigned long faults, total_faults;
1722 
1723 	if (!ng)
1724 		return 0;
1725 
1726 	total_faults = ng->total_faults;
1727 
1728 	if (!total_faults)
1729 		return 0;
1730 
1731 	faults = group_faults(p, nid);
1732 	faults += score_nearby_nodes(p, nid, dist, false);
1733 
1734 	return 1000 * faults / total_faults;
1735 }
1736 
1737 /*
1738  * If memory tiering mode is enabled, cpupid of slow memory page is
1739  * used to record scan time instead of CPU and PID.  When tiering mode
1740  * is disabled at run time, the scan time (in cpupid) will be
1741  * interpreted as CPU and PID.  So CPU needs to be checked to avoid to
1742  * access out of array bound.
1743  */
cpupid_valid(int cpupid)1744 static inline bool cpupid_valid(int cpupid)
1745 {
1746 	return cpupid_to_cpu(cpupid) < nr_cpu_ids;
1747 }
1748 
1749 /*
1750  * For memory tiering mode, if there are enough free pages (more than
1751  * enough watermark defined here) in fast memory node, to take full
1752  * advantage of fast memory capacity, all recently accessed slow
1753  * memory pages will be migrated to fast memory node without
1754  * considering hot threshold.
1755  */
pgdat_free_space_enough(struct pglist_data * pgdat)1756 static bool pgdat_free_space_enough(struct pglist_data *pgdat)
1757 {
1758 	int z;
1759 	unsigned long enough_wmark;
1760 
1761 	enough_wmark = max(1UL * 1024 * 1024 * 1024 >> PAGE_SHIFT,
1762 			   pgdat->node_present_pages >> 4);
1763 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1764 		struct zone *zone = pgdat->node_zones + z;
1765 
1766 		if (!populated_zone(zone))
1767 			continue;
1768 
1769 		if (zone_watermark_ok(zone, 0,
1770 				      wmark_pages(zone, WMARK_PROMO) + enough_wmark,
1771 				      ZONE_MOVABLE, 0))
1772 			return true;
1773 	}
1774 	return false;
1775 }
1776 
1777 /*
1778  * For memory tiering mode, when page tables are scanned, the scan
1779  * time will be recorded in struct page in addition to make page
1780  * PROT_NONE for slow memory page.  So when the page is accessed, in
1781  * hint page fault handler, the hint page fault latency is calculated
1782  * via,
1783  *
1784  *	hint page fault latency = hint page fault time - scan time
1785  *
1786  * The smaller the hint page fault latency, the higher the possibility
1787  * for the page to be hot.
1788  */
numa_hint_fault_latency(struct folio * folio)1789 static int numa_hint_fault_latency(struct folio *folio)
1790 {
1791 	int last_time, time;
1792 
1793 	time = jiffies_to_msecs(jiffies);
1794 	last_time = xchg_page_access_time(&folio->page, time);
1795 
1796 	return (time - last_time) & PAGE_ACCESS_TIME_MASK;
1797 }
1798 
1799 /*
1800  * For memory tiering mode, too high promotion/demotion throughput may
1801  * hurt application latency.  So we provide a mechanism to rate limit
1802  * the number of pages that are tried to be promoted.
1803  */
numa_promotion_rate_limit(struct pglist_data * pgdat,unsigned long rate_limit,int nr)1804 static bool numa_promotion_rate_limit(struct pglist_data *pgdat,
1805 				      unsigned long rate_limit, int nr)
1806 {
1807 	unsigned long nr_cand;
1808 	unsigned int now, start;
1809 
1810 	now = jiffies_to_msecs(jiffies);
1811 	mod_node_page_state(pgdat, PGPROMOTE_CANDIDATE, nr);
1812 	nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
1813 	start = pgdat->nbp_rl_start;
1814 	if (now - start > MSEC_PER_SEC &&
1815 	    cmpxchg(&pgdat->nbp_rl_start, start, now) == start)
1816 		pgdat->nbp_rl_nr_cand = nr_cand;
1817 	if (nr_cand - pgdat->nbp_rl_nr_cand >= rate_limit)
1818 		return true;
1819 	return false;
1820 }
1821 
1822 #define NUMA_MIGRATION_ADJUST_STEPS	16
1823 
numa_promotion_adjust_threshold(struct pglist_data * pgdat,unsigned long rate_limit,unsigned int ref_th)1824 static void numa_promotion_adjust_threshold(struct pglist_data *pgdat,
1825 					    unsigned long rate_limit,
1826 					    unsigned int ref_th)
1827 {
1828 	unsigned int now, start, th_period, unit_th, th;
1829 	unsigned long nr_cand, ref_cand, diff_cand;
1830 
1831 	now = jiffies_to_msecs(jiffies);
1832 	th_period = sysctl_numa_balancing_scan_period_max;
1833 	start = pgdat->nbp_th_start;
1834 	if (now - start > th_period &&
1835 	    cmpxchg(&pgdat->nbp_th_start, start, now) == start) {
1836 		ref_cand = rate_limit *
1837 			sysctl_numa_balancing_scan_period_max / MSEC_PER_SEC;
1838 		nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
1839 		diff_cand = nr_cand - pgdat->nbp_th_nr_cand;
1840 		unit_th = ref_th * 2 / NUMA_MIGRATION_ADJUST_STEPS;
1841 		th = pgdat->nbp_threshold ? : ref_th;
1842 		if (diff_cand > ref_cand * 11 / 10)
1843 			th = max(th - unit_th, unit_th);
1844 		else if (diff_cand < ref_cand * 9 / 10)
1845 			th = min(th + unit_th, ref_th * 2);
1846 		pgdat->nbp_th_nr_cand = nr_cand;
1847 		pgdat->nbp_threshold = th;
1848 	}
1849 }
1850 
should_numa_migrate_memory(struct task_struct * p,struct folio * folio,int src_nid,int dst_cpu)1851 bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio,
1852 				int src_nid, int dst_cpu)
1853 {
1854 	struct numa_group *ng = deref_curr_numa_group(p);
1855 	int dst_nid = cpu_to_node(dst_cpu);
1856 	int last_cpupid, this_cpupid;
1857 
1858 	/*
1859 	 * The pages in slow memory node should be migrated according
1860 	 * to hot/cold instead of private/shared.
1861 	 */
1862 	if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
1863 	    !node_is_toptier(src_nid)) {
1864 		struct pglist_data *pgdat;
1865 		unsigned long rate_limit;
1866 		unsigned int latency, th, def_th;
1867 
1868 		pgdat = NODE_DATA(dst_nid);
1869 		if (pgdat_free_space_enough(pgdat)) {
1870 			/* workload changed, reset hot threshold */
1871 			pgdat->nbp_threshold = 0;
1872 			return true;
1873 		}
1874 
1875 		def_th = sysctl_numa_balancing_hot_threshold;
1876 		rate_limit = sysctl_numa_balancing_promote_rate_limit << \
1877 			(20 - PAGE_SHIFT);
1878 		numa_promotion_adjust_threshold(pgdat, rate_limit, def_th);
1879 
1880 		th = pgdat->nbp_threshold ? : def_th;
1881 		latency = numa_hint_fault_latency(folio);
1882 		if (latency >= th)
1883 			return false;
1884 
1885 		return !numa_promotion_rate_limit(pgdat, rate_limit,
1886 						  folio_nr_pages(folio));
1887 	}
1888 
1889 	this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1890 	last_cpupid = page_cpupid_xchg_last(&folio->page, this_cpupid);
1891 
1892 	if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
1893 	    !node_is_toptier(src_nid) && !cpupid_valid(last_cpupid))
1894 		return false;
1895 
1896 	/*
1897 	 * Allow first faults or private faults to migrate immediately early in
1898 	 * the lifetime of a task. The magic number 4 is based on waiting for
1899 	 * two full passes of the "multi-stage node selection" test that is
1900 	 * executed below.
1901 	 */
1902 	if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
1903 	    (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
1904 		return true;
1905 
1906 	/*
1907 	 * Multi-stage node selection is used in conjunction with a periodic
1908 	 * migration fault to build a temporal task<->page relation. By using
1909 	 * a two-stage filter we remove short/unlikely relations.
1910 	 *
1911 	 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1912 	 * a task's usage of a particular page (n_p) per total usage of this
1913 	 * page (n_t) (in a given time-span) to a probability.
1914 	 *
1915 	 * Our periodic faults will sample this probability and getting the
1916 	 * same result twice in a row, given these samples are fully
1917 	 * independent, is then given by P(n)^2, provided our sample period
1918 	 * is sufficiently short compared to the usage pattern.
1919 	 *
1920 	 * This quadric squishes small probabilities, making it less likely we
1921 	 * act on an unlikely task<->page relation.
1922 	 */
1923 	if (!cpupid_pid_unset(last_cpupid) &&
1924 				cpupid_to_nid(last_cpupid) != dst_nid)
1925 		return false;
1926 
1927 	/* Always allow migrate on private faults */
1928 	if (cpupid_match_pid(p, last_cpupid))
1929 		return true;
1930 
1931 	/* A shared fault, but p->numa_group has not been set up yet. */
1932 	if (!ng)
1933 		return true;
1934 
1935 	/*
1936 	 * Destination node is much more heavily used than the source
1937 	 * node? Allow migration.
1938 	 */
1939 	if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1940 					ACTIVE_NODE_FRACTION)
1941 		return true;
1942 
1943 	/*
1944 	 * Distribute memory according to CPU & memory use on each node,
1945 	 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1946 	 *
1947 	 * faults_cpu(dst)   3   faults_cpu(src)
1948 	 * --------------- * - > ---------------
1949 	 * faults_mem(dst)   4   faults_mem(src)
1950 	 */
1951 	return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1952 	       group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
1953 }
1954 
1955 /*
1956  * 'numa_type' describes the node at the moment of load balancing.
1957  */
1958 enum numa_type {
1959 	/* The node has spare capacity that can be used to run more tasks.  */
1960 	node_has_spare = 0,
1961 	/*
1962 	 * The node is fully used and the tasks don't compete for more CPU
1963 	 * cycles. Nevertheless, some tasks might wait before running.
1964 	 */
1965 	node_fully_busy,
1966 	/*
1967 	 * The node is overloaded and can't provide expected CPU cycles to all
1968 	 * tasks.
1969 	 */
1970 	node_overloaded
1971 };
1972 
1973 /* Cached statistics for all CPUs within a node */
1974 struct numa_stats {
1975 	unsigned long load;
1976 	unsigned long runnable;
1977 	unsigned long util;
1978 	/* Total compute capacity of CPUs on a node */
1979 	unsigned long compute_capacity;
1980 	unsigned int nr_running;
1981 	unsigned int weight;
1982 	enum numa_type node_type;
1983 	int idle_cpu;
1984 };
1985 
1986 struct task_numa_env {
1987 	struct task_struct *p;
1988 
1989 	int src_cpu, src_nid;
1990 	int dst_cpu, dst_nid;
1991 	int imb_numa_nr;
1992 
1993 	struct numa_stats src_stats, dst_stats;
1994 
1995 	int imbalance_pct;
1996 	int dist;
1997 
1998 	struct task_struct *best_task;
1999 	long best_imp;
2000 	int best_cpu;
2001 };
2002 
2003 static unsigned long cpu_load(struct rq *rq);
2004 static unsigned long cpu_runnable(struct rq *rq);
2005 
2006 static inline enum
numa_classify(unsigned int imbalance_pct,struct numa_stats * ns)2007 numa_type numa_classify(unsigned int imbalance_pct,
2008 			 struct numa_stats *ns)
2009 {
2010 	if ((ns->nr_running > ns->weight) &&
2011 	    (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) ||
2012 	     ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100))))
2013 		return node_overloaded;
2014 
2015 	if ((ns->nr_running < ns->weight) ||
2016 	    (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) &&
2017 	     ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100))))
2018 		return node_has_spare;
2019 
2020 	return node_fully_busy;
2021 }
2022 
2023 #ifdef CONFIG_SCHED_SMT
2024 /* Forward declarations of select_idle_sibling helpers */
2025 static inline bool test_idle_cores(int cpu);
numa_idle_core(int idle_core,int cpu)2026 static inline int numa_idle_core(int idle_core, int cpu)
2027 {
2028 	if (!static_branch_likely(&sched_smt_present) ||
2029 	    idle_core >= 0 || !test_idle_cores(cpu))
2030 		return idle_core;
2031 
2032 	/*
2033 	 * Prefer cores instead of packing HT siblings
2034 	 * and triggering future load balancing.
2035 	 */
2036 	if (is_core_idle(cpu))
2037 		idle_core = cpu;
2038 
2039 	return idle_core;
2040 }
2041 #else
numa_idle_core(int idle_core,int cpu)2042 static inline int numa_idle_core(int idle_core, int cpu)
2043 {
2044 	return idle_core;
2045 }
2046 #endif
2047 
2048 /*
2049  * Gather all necessary information to make NUMA balancing placement
2050  * decisions that are compatible with standard load balancer. This
2051  * borrows code and logic from update_sg_lb_stats but sharing a
2052  * common implementation is impractical.
2053  */
update_numa_stats(struct task_numa_env * env,struct numa_stats * ns,int nid,bool find_idle)2054 static void update_numa_stats(struct task_numa_env *env,
2055 			      struct numa_stats *ns, int nid,
2056 			      bool find_idle)
2057 {
2058 	int cpu, idle_core = -1;
2059 
2060 	memset(ns, 0, sizeof(*ns));
2061 	ns->idle_cpu = -1;
2062 
2063 	rcu_read_lock();
2064 	for_each_cpu(cpu, cpumask_of_node(nid)) {
2065 		struct rq *rq = cpu_rq(cpu);
2066 
2067 		ns->load += cpu_load(rq);
2068 		ns->runnable += cpu_runnable(rq);
2069 		ns->util += cpu_util_cfs(cpu);
2070 		ns->nr_running += rq->cfs.h_nr_running;
2071 		ns->compute_capacity += capacity_of(cpu);
2072 
2073 		if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
2074 			if (READ_ONCE(rq->numa_migrate_on) ||
2075 			    !cpumask_test_cpu(cpu, env->p->cpus_ptr))
2076 				continue;
2077 
2078 			if (ns->idle_cpu == -1)
2079 				ns->idle_cpu = cpu;
2080 
2081 			idle_core = numa_idle_core(idle_core, cpu);
2082 		}
2083 	}
2084 	rcu_read_unlock();
2085 
2086 	ns->weight = cpumask_weight(cpumask_of_node(nid));
2087 
2088 	ns->node_type = numa_classify(env->imbalance_pct, ns);
2089 
2090 	if (idle_core >= 0)
2091 		ns->idle_cpu = idle_core;
2092 }
2093 
task_numa_assign(struct task_numa_env * env,struct task_struct * p,long imp)2094 static void task_numa_assign(struct task_numa_env *env,
2095 			     struct task_struct *p, long imp)
2096 {
2097 	struct rq *rq = cpu_rq(env->dst_cpu);
2098 
2099 	/* Check if run-queue part of active NUMA balance. */
2100 	if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
2101 		int cpu;
2102 		int start = env->dst_cpu;
2103 
2104 		/* Find alternative idle CPU. */
2105 		for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start + 1) {
2106 			if (cpu == env->best_cpu || !idle_cpu(cpu) ||
2107 			    !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
2108 				continue;
2109 			}
2110 
2111 			env->dst_cpu = cpu;
2112 			rq = cpu_rq(env->dst_cpu);
2113 			if (!xchg(&rq->numa_migrate_on, 1))
2114 				goto assign;
2115 		}
2116 
2117 		/* Failed to find an alternative idle CPU */
2118 		return;
2119 	}
2120 
2121 assign:
2122 	/*
2123 	 * Clear previous best_cpu/rq numa-migrate flag, since task now
2124 	 * found a better CPU to move/swap.
2125 	 */
2126 	if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) {
2127 		rq = cpu_rq(env->best_cpu);
2128 		WRITE_ONCE(rq->numa_migrate_on, 0);
2129 	}
2130 
2131 	if (env->best_task)
2132 		put_task_struct(env->best_task);
2133 	if (p)
2134 		get_task_struct(p);
2135 
2136 	env->best_task = p;
2137 	env->best_imp = imp;
2138 	env->best_cpu = env->dst_cpu;
2139 }
2140 
load_too_imbalanced(long src_load,long dst_load,struct task_numa_env * env)2141 static bool load_too_imbalanced(long src_load, long dst_load,
2142 				struct task_numa_env *env)
2143 {
2144 	long imb, old_imb;
2145 	long orig_src_load, orig_dst_load;
2146 	long src_capacity, dst_capacity;
2147 
2148 	/*
2149 	 * The load is corrected for the CPU capacity available on each node.
2150 	 *
2151 	 * src_load        dst_load
2152 	 * ------------ vs ---------
2153 	 * src_capacity    dst_capacity
2154 	 */
2155 	src_capacity = env->src_stats.compute_capacity;
2156 	dst_capacity = env->dst_stats.compute_capacity;
2157 
2158 	imb = abs(dst_load * src_capacity - src_load * dst_capacity);
2159 
2160 	orig_src_load = env->src_stats.load;
2161 	orig_dst_load = env->dst_stats.load;
2162 
2163 	old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
2164 
2165 	/* Would this change make things worse? */
2166 	return (imb > old_imb);
2167 }
2168 
2169 /*
2170  * Maximum NUMA importance can be 1998 (2*999);
2171  * SMALLIMP @ 30 would be close to 1998/64.
2172  * Used to deter task migration.
2173  */
2174 #define SMALLIMP	30
2175 
2176 /*
2177  * This checks if the overall compute and NUMA accesses of the system would
2178  * be improved if the source tasks was migrated to the target dst_cpu taking
2179  * into account that it might be best if task running on the dst_cpu should
2180  * be exchanged with the source task
2181  */
task_numa_compare(struct task_numa_env * env,long taskimp,long groupimp,bool maymove)2182 static bool task_numa_compare(struct task_numa_env *env,
2183 			      long taskimp, long groupimp, bool maymove)
2184 {
2185 	struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
2186 	struct rq *dst_rq = cpu_rq(env->dst_cpu);
2187 	long imp = p_ng ? groupimp : taskimp;
2188 	struct task_struct *cur;
2189 	long src_load, dst_load;
2190 	int dist = env->dist;
2191 	long moveimp = imp;
2192 	long load;
2193 	bool stopsearch = false;
2194 
2195 	if (READ_ONCE(dst_rq->numa_migrate_on))
2196 		return false;
2197 
2198 	rcu_read_lock();
2199 	cur = rcu_dereference(dst_rq->curr);
2200 	if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
2201 		cur = NULL;
2202 
2203 	/*
2204 	 * Because we have preemption enabled we can get migrated around and
2205 	 * end try selecting ourselves (current == env->p) as a swap candidate.
2206 	 */
2207 	if (cur == env->p) {
2208 		stopsearch = true;
2209 		goto unlock;
2210 	}
2211 
2212 	if (!cur) {
2213 		if (maymove && moveimp >= env->best_imp)
2214 			goto assign;
2215 		else
2216 			goto unlock;
2217 	}
2218 
2219 	/* Skip this swap candidate if cannot move to the source cpu. */
2220 	if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
2221 		goto unlock;
2222 
2223 	/*
2224 	 * Skip this swap candidate if it is not moving to its preferred
2225 	 * node and the best task is.
2226 	 */
2227 	if (env->best_task &&
2228 	    env->best_task->numa_preferred_nid == env->src_nid &&
2229 	    cur->numa_preferred_nid != env->src_nid) {
2230 		goto unlock;
2231 	}
2232 
2233 	/*
2234 	 * "imp" is the fault differential for the source task between the
2235 	 * source and destination node. Calculate the total differential for
2236 	 * the source task and potential destination task. The more negative
2237 	 * the value is, the more remote accesses that would be expected to
2238 	 * be incurred if the tasks were swapped.
2239 	 *
2240 	 * If dst and source tasks are in the same NUMA group, or not
2241 	 * in any group then look only at task weights.
2242 	 */
2243 	cur_ng = rcu_dereference(cur->numa_group);
2244 	if (cur_ng == p_ng) {
2245 		/*
2246 		 * Do not swap within a group or between tasks that have
2247 		 * no group if there is spare capacity. Swapping does
2248 		 * not address the load imbalance and helps one task at
2249 		 * the cost of punishing another.
2250 		 */
2251 		if (env->dst_stats.node_type == node_has_spare)
2252 			goto unlock;
2253 
2254 		imp = taskimp + task_weight(cur, env->src_nid, dist) -
2255 		      task_weight(cur, env->dst_nid, dist);
2256 		/*
2257 		 * Add some hysteresis to prevent swapping the
2258 		 * tasks within a group over tiny differences.
2259 		 */
2260 		if (cur_ng)
2261 			imp -= imp / 16;
2262 	} else {
2263 		/*
2264 		 * Compare the group weights. If a task is all by itself
2265 		 * (not part of a group), use the task weight instead.
2266 		 */
2267 		if (cur_ng && p_ng)
2268 			imp += group_weight(cur, env->src_nid, dist) -
2269 			       group_weight(cur, env->dst_nid, dist);
2270 		else
2271 			imp += task_weight(cur, env->src_nid, dist) -
2272 			       task_weight(cur, env->dst_nid, dist);
2273 	}
2274 
2275 	/* Discourage picking a task already on its preferred node */
2276 	if (cur->numa_preferred_nid == env->dst_nid)
2277 		imp -= imp / 16;
2278 
2279 	/*
2280 	 * Encourage picking a task that moves to its preferred node.
2281 	 * This potentially makes imp larger than it's maximum of
2282 	 * 1998 (see SMALLIMP and task_weight for why) but in this
2283 	 * case, it does not matter.
2284 	 */
2285 	if (cur->numa_preferred_nid == env->src_nid)
2286 		imp += imp / 8;
2287 
2288 	if (maymove && moveimp > imp && moveimp > env->best_imp) {
2289 		imp = moveimp;
2290 		cur = NULL;
2291 		goto assign;
2292 	}
2293 
2294 	/*
2295 	 * Prefer swapping with a task moving to its preferred node over a
2296 	 * task that is not.
2297 	 */
2298 	if (env->best_task && cur->numa_preferred_nid == env->src_nid &&
2299 	    env->best_task->numa_preferred_nid != env->src_nid) {
2300 		goto assign;
2301 	}
2302 
2303 	/*
2304 	 * If the NUMA importance is less than SMALLIMP,
2305 	 * task migration might only result in ping pong
2306 	 * of tasks and also hurt performance due to cache
2307 	 * misses.
2308 	 */
2309 	if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
2310 		goto unlock;
2311 
2312 	/*
2313 	 * In the overloaded case, try and keep the load balanced.
2314 	 */
2315 	load = task_h_load(env->p) - task_h_load(cur);
2316 	if (!load)
2317 		goto assign;
2318 
2319 	dst_load = env->dst_stats.load + load;
2320 	src_load = env->src_stats.load - load;
2321 
2322 	if (load_too_imbalanced(src_load, dst_load, env))
2323 		goto unlock;
2324 
2325 assign:
2326 	/* Evaluate an idle CPU for a task numa move. */
2327 	if (!cur) {
2328 		int cpu = env->dst_stats.idle_cpu;
2329 
2330 		/* Nothing cached so current CPU went idle since the search. */
2331 		if (cpu < 0)
2332 			cpu = env->dst_cpu;
2333 
2334 		/*
2335 		 * If the CPU is no longer truly idle and the previous best CPU
2336 		 * is, keep using it.
2337 		 */
2338 		if (!idle_cpu(cpu) && env->best_cpu >= 0 &&
2339 		    idle_cpu(env->best_cpu)) {
2340 			cpu = env->best_cpu;
2341 		}
2342 
2343 		env->dst_cpu = cpu;
2344 	}
2345 
2346 	task_numa_assign(env, cur, imp);
2347 
2348 	/*
2349 	 * If a move to idle is allowed because there is capacity or load
2350 	 * balance improves then stop the search. While a better swap
2351 	 * candidate may exist, a search is not free.
2352 	 */
2353 	if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu))
2354 		stopsearch = true;
2355 
2356 	/*
2357 	 * If a swap candidate must be identified and the current best task
2358 	 * moves its preferred node then stop the search.
2359 	 */
2360 	if (!maymove && env->best_task &&
2361 	    env->best_task->numa_preferred_nid == env->src_nid) {
2362 		stopsearch = true;
2363 	}
2364 unlock:
2365 	rcu_read_unlock();
2366 
2367 	return stopsearch;
2368 }
2369 
task_numa_find_cpu(struct task_numa_env * env,long taskimp,long groupimp)2370 static void task_numa_find_cpu(struct task_numa_env *env,
2371 				long taskimp, long groupimp)
2372 {
2373 	bool maymove = false;
2374 	int cpu;
2375 
2376 	/*
2377 	 * If dst node has spare capacity, then check if there is an
2378 	 * imbalance that would be overruled by the load balancer.
2379 	 */
2380 	if (env->dst_stats.node_type == node_has_spare) {
2381 		unsigned int imbalance;
2382 		int src_running, dst_running;
2383 
2384 		/*
2385 		 * Would movement cause an imbalance? Note that if src has
2386 		 * more running tasks that the imbalance is ignored as the
2387 		 * move improves the imbalance from the perspective of the
2388 		 * CPU load balancer.
2389 		 * */
2390 		src_running = env->src_stats.nr_running - 1;
2391 		dst_running = env->dst_stats.nr_running + 1;
2392 		imbalance = max(0, dst_running - src_running);
2393 		imbalance = adjust_numa_imbalance(imbalance, dst_running,
2394 						  env->imb_numa_nr);
2395 
2396 		/* Use idle CPU if there is no imbalance */
2397 		if (!imbalance) {
2398 			maymove = true;
2399 			if (env->dst_stats.idle_cpu >= 0) {
2400 				env->dst_cpu = env->dst_stats.idle_cpu;
2401 				task_numa_assign(env, NULL, 0);
2402 				return;
2403 			}
2404 		}
2405 	} else {
2406 		long src_load, dst_load, load;
2407 		/*
2408 		 * If the improvement from just moving env->p direction is better
2409 		 * than swapping tasks around, check if a move is possible.
2410 		 */
2411 		load = task_h_load(env->p);
2412 		dst_load = env->dst_stats.load + load;
2413 		src_load = env->src_stats.load - load;
2414 		maymove = !load_too_imbalanced(src_load, dst_load, env);
2415 	}
2416 
2417 	for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
2418 		/* Skip this CPU if the source task cannot migrate */
2419 		if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
2420 			continue;
2421 
2422 		env->dst_cpu = cpu;
2423 		if (task_numa_compare(env, taskimp, groupimp, maymove))
2424 			break;
2425 	}
2426 }
2427 
task_numa_migrate(struct task_struct * p)2428 static int task_numa_migrate(struct task_struct *p)
2429 {
2430 	struct task_numa_env env = {
2431 		.p = p,
2432 
2433 		.src_cpu = task_cpu(p),
2434 		.src_nid = task_node(p),
2435 
2436 		.imbalance_pct = 112,
2437 
2438 		.best_task = NULL,
2439 		.best_imp = 0,
2440 		.best_cpu = -1,
2441 	};
2442 	unsigned long taskweight, groupweight;
2443 	struct sched_domain *sd;
2444 	long taskimp, groupimp;
2445 	struct numa_group *ng;
2446 	struct rq *best_rq;
2447 	int nid, ret, dist;
2448 
2449 	/*
2450 	 * Pick the lowest SD_NUMA domain, as that would have the smallest
2451 	 * imbalance and would be the first to start moving tasks about.
2452 	 *
2453 	 * And we want to avoid any moving of tasks about, as that would create
2454 	 * random movement of tasks -- counter the numa conditions we're trying
2455 	 * to satisfy here.
2456 	 */
2457 	rcu_read_lock();
2458 	sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
2459 	if (sd) {
2460 		env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
2461 		env.imb_numa_nr = sd->imb_numa_nr;
2462 	}
2463 	rcu_read_unlock();
2464 
2465 	/*
2466 	 * Cpusets can break the scheduler domain tree into smaller
2467 	 * balance domains, some of which do not cross NUMA boundaries.
2468 	 * Tasks that are "trapped" in such domains cannot be migrated
2469 	 * elsewhere, so there is no point in (re)trying.
2470 	 */
2471 	if (unlikely(!sd)) {
2472 		sched_setnuma(p, task_node(p));
2473 		return -EINVAL;
2474 	}
2475 
2476 	env.dst_nid = p->numa_preferred_nid;
2477 	dist = env.dist = node_distance(env.src_nid, env.dst_nid);
2478 	taskweight = task_weight(p, env.src_nid, dist);
2479 	groupweight = group_weight(p, env.src_nid, dist);
2480 	update_numa_stats(&env, &env.src_stats, env.src_nid, false);
2481 	taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
2482 	groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
2483 	update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
2484 
2485 	/* Try to find a spot on the preferred nid. */
2486 	task_numa_find_cpu(&env, taskimp, groupimp);
2487 
2488 	/*
2489 	 * Look at other nodes in these cases:
2490 	 * - there is no space available on the preferred_nid
2491 	 * - the task is part of a numa_group that is interleaved across
2492 	 *   multiple NUMA nodes; in order to better consolidate the group,
2493 	 *   we need to check other locations.
2494 	 */
2495 	ng = deref_curr_numa_group(p);
2496 	if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
2497 		for_each_node_state(nid, N_CPU) {
2498 			if (nid == env.src_nid || nid == p->numa_preferred_nid)
2499 				continue;
2500 
2501 			dist = node_distance(env.src_nid, env.dst_nid);
2502 			if (sched_numa_topology_type == NUMA_BACKPLANE &&
2503 						dist != env.dist) {
2504 				taskweight = task_weight(p, env.src_nid, dist);
2505 				groupweight = group_weight(p, env.src_nid, dist);
2506 			}
2507 
2508 			/* Only consider nodes where both task and groups benefit */
2509 			taskimp = task_weight(p, nid, dist) - taskweight;
2510 			groupimp = group_weight(p, nid, dist) - groupweight;
2511 			if (taskimp < 0 && groupimp < 0)
2512 				continue;
2513 
2514 			env.dist = dist;
2515 			env.dst_nid = nid;
2516 			update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
2517 			task_numa_find_cpu(&env, taskimp, groupimp);
2518 		}
2519 	}
2520 
2521 	/*
2522 	 * If the task is part of a workload that spans multiple NUMA nodes,
2523 	 * and is migrating into one of the workload's active nodes, remember
2524 	 * this node as the task's preferred numa node, so the workload can
2525 	 * settle down.
2526 	 * A task that migrated to a second choice node will be better off
2527 	 * trying for a better one later. Do not set the preferred node here.
2528 	 */
2529 	if (ng) {
2530 		if (env.best_cpu == -1)
2531 			nid = env.src_nid;
2532 		else
2533 			nid = cpu_to_node(env.best_cpu);
2534 
2535 		if (nid != p->numa_preferred_nid)
2536 			sched_setnuma(p, nid);
2537 	}
2538 
2539 	/* No better CPU than the current one was found. */
2540 	if (env.best_cpu == -1) {
2541 		trace_sched_stick_numa(p, env.src_cpu, NULL, -1);
2542 		return -EAGAIN;
2543 	}
2544 
2545 	best_rq = cpu_rq(env.best_cpu);
2546 	if (env.best_task == NULL) {
2547 		ret = migrate_task_to(p, env.best_cpu);
2548 		WRITE_ONCE(best_rq->numa_migrate_on, 0);
2549 		if (ret != 0)
2550 			trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu);
2551 		return ret;
2552 	}
2553 
2554 	ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
2555 	WRITE_ONCE(best_rq->numa_migrate_on, 0);
2556 
2557 	if (ret != 0)
2558 		trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu);
2559 	put_task_struct(env.best_task);
2560 	return ret;
2561 }
2562 
2563 /* Attempt to migrate a task to a CPU on the preferred node. */
numa_migrate_preferred(struct task_struct * p)2564 static void numa_migrate_preferred(struct task_struct *p)
2565 {
2566 	unsigned long interval = HZ;
2567 
2568 	/* This task has no NUMA fault statistics yet */
2569 	if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
2570 		return;
2571 
2572 	/* Periodically retry migrating the task to the preferred node */
2573 	interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
2574 	p->numa_migrate_retry = jiffies + interval;
2575 
2576 	/* Success if task is already running on preferred CPU */
2577 	if (task_node(p) == p->numa_preferred_nid)
2578 		return;
2579 
2580 	/* Otherwise, try migrate to a CPU on the preferred node */
2581 	task_numa_migrate(p);
2582 }
2583 
2584 /*
2585  * Find out how many nodes the workload is actively running on. Do this by
2586  * tracking the nodes from which NUMA hinting faults are triggered. This can
2587  * be different from the set of nodes where the workload's memory is currently
2588  * located.
2589  */
numa_group_count_active_nodes(struct numa_group * numa_group)2590 static void numa_group_count_active_nodes(struct numa_group *numa_group)
2591 {
2592 	unsigned long faults, max_faults = 0;
2593 	int nid, active_nodes = 0;
2594 
2595 	for_each_node_state(nid, N_CPU) {
2596 		faults = group_faults_cpu(numa_group, nid);
2597 		if (faults > max_faults)
2598 			max_faults = faults;
2599 	}
2600 
2601 	for_each_node_state(nid, N_CPU) {
2602 		faults = group_faults_cpu(numa_group, nid);
2603 		if (faults * ACTIVE_NODE_FRACTION > max_faults)
2604 			active_nodes++;
2605 	}
2606 
2607 	numa_group->max_faults_cpu = max_faults;
2608 	numa_group->active_nodes = active_nodes;
2609 }
2610 
2611 /*
2612  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
2613  * increments. The more local the fault statistics are, the higher the scan
2614  * period will be for the next scan window. If local/(local+remote) ratio is
2615  * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
2616  * the scan period will decrease. Aim for 70% local accesses.
2617  */
2618 #define NUMA_PERIOD_SLOTS 10
2619 #define NUMA_PERIOD_THRESHOLD 7
2620 
2621 /*
2622  * Increase the scan period (slow down scanning) if the majority of
2623  * our memory is already on our local node, or if the majority of
2624  * the page accesses are shared with other processes.
2625  * Otherwise, decrease the scan period.
2626  */
update_task_scan_period(struct task_struct * p,unsigned long shared,unsigned long private)2627 static void update_task_scan_period(struct task_struct *p,
2628 			unsigned long shared, unsigned long private)
2629 {
2630 	unsigned int period_slot;
2631 	int lr_ratio, ps_ratio;
2632 	int diff;
2633 
2634 	unsigned long remote = p->numa_faults_locality[0];
2635 	unsigned long local = p->numa_faults_locality[1];
2636 
2637 	/*
2638 	 * If there were no record hinting faults then either the task is
2639 	 * completely idle or all activity is in areas that are not of interest
2640 	 * to automatic numa balancing. Related to that, if there were failed
2641 	 * migration then it implies we are migrating too quickly or the local
2642 	 * node is overloaded. In either case, scan slower
2643 	 */
2644 	if (local + shared == 0 || p->numa_faults_locality[2]) {
2645 		p->numa_scan_period = min(p->numa_scan_period_max,
2646 			p->numa_scan_period << 1);
2647 
2648 		p->mm->numa_next_scan = jiffies +
2649 			msecs_to_jiffies(p->numa_scan_period);
2650 
2651 		return;
2652 	}
2653 
2654 	/*
2655 	 * Prepare to scale scan period relative to the current period.
2656 	 *	 == NUMA_PERIOD_THRESHOLD scan period stays the same
2657 	 *       <  NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
2658 	 *	 >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
2659 	 */
2660 	period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
2661 	lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
2662 	ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared);
2663 
2664 	if (ps_ratio >= NUMA_PERIOD_THRESHOLD) {
2665 		/*
2666 		 * Most memory accesses are local. There is no need to
2667 		 * do fast NUMA scanning, since memory is already local.
2668 		 */
2669 		int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
2670 		if (!slot)
2671 			slot = 1;
2672 		diff = slot * period_slot;
2673 	} else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) {
2674 		/*
2675 		 * Most memory accesses are shared with other tasks.
2676 		 * There is no point in continuing fast NUMA scanning,
2677 		 * since other tasks may just move the memory elsewhere.
2678 		 */
2679 		int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
2680 		if (!slot)
2681 			slot = 1;
2682 		diff = slot * period_slot;
2683 	} else {
2684 		/*
2685 		 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
2686 		 * yet they are not on the local NUMA node. Speed up
2687 		 * NUMA scanning to get the memory moved over.
2688 		 */
2689 		int ratio = max(lr_ratio, ps_ratio);
2690 		diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
2691 	}
2692 
2693 	p->numa_scan_period = clamp(p->numa_scan_period + diff,
2694 			task_scan_min(p), task_scan_max(p));
2695 	memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2696 }
2697 
2698 /*
2699  * Get the fraction of time the task has been running since the last
2700  * NUMA placement cycle. The scheduler keeps similar statistics, but
2701  * decays those on a 32ms period, which is orders of magnitude off
2702  * from the dozens-of-seconds NUMA balancing period. Use the scheduler
2703  * stats only if the task is so new there are no NUMA statistics yet.
2704  */
numa_get_avg_runtime(struct task_struct * p,u64 * period)2705 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
2706 {
2707 	u64 runtime, delta, now;
2708 	/* Use the start of this time slice to avoid calculations. */
2709 	now = p->se.exec_start;
2710 	runtime = p->se.sum_exec_runtime;
2711 
2712 	if (p->last_task_numa_placement) {
2713 		delta = runtime - p->last_sum_exec_runtime;
2714 		*period = now - p->last_task_numa_placement;
2715 
2716 		/* Avoid time going backwards, prevent potential divide error: */
2717 		if (unlikely((s64)*period < 0))
2718 			*period = 0;
2719 	} else {
2720 		delta = p->se.avg.load_sum;
2721 		*period = LOAD_AVG_MAX;
2722 	}
2723 
2724 	p->last_sum_exec_runtime = runtime;
2725 	p->last_task_numa_placement = now;
2726 
2727 	return delta;
2728 }
2729 
2730 /*
2731  * Determine the preferred nid for a task in a numa_group. This needs to
2732  * be done in a way that produces consistent results with group_weight,
2733  * otherwise workloads might not converge.
2734  */
preferred_group_nid(struct task_struct * p,int nid)2735 static int preferred_group_nid(struct task_struct *p, int nid)
2736 {
2737 	nodemask_t nodes;
2738 	int dist;
2739 
2740 	/* Direct connections between all NUMA nodes. */
2741 	if (sched_numa_topology_type == NUMA_DIRECT)
2742 		return nid;
2743 
2744 	/*
2745 	 * On a system with glueless mesh NUMA topology, group_weight
2746 	 * scores nodes according to the number of NUMA hinting faults on
2747 	 * both the node itself, and on nearby nodes.
2748 	 */
2749 	if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
2750 		unsigned long score, max_score = 0;
2751 		int node, max_node = nid;
2752 
2753 		dist = sched_max_numa_distance;
2754 
2755 		for_each_node_state(node, N_CPU) {
2756 			score = group_weight(p, node, dist);
2757 			if (score > max_score) {
2758 				max_score = score;
2759 				max_node = node;
2760 			}
2761 		}
2762 		return max_node;
2763 	}
2764 
2765 	/*
2766 	 * Finding the preferred nid in a system with NUMA backplane
2767 	 * interconnect topology is more involved. The goal is to locate
2768 	 * tasks from numa_groups near each other in the system, and
2769 	 * untangle workloads from different sides of the system. This requires
2770 	 * searching down the hierarchy of node groups, recursively searching
2771 	 * inside the highest scoring group of nodes. The nodemask tricks
2772 	 * keep the complexity of the search down.
2773 	 */
2774 	nodes = node_states[N_CPU];
2775 	for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
2776 		unsigned long max_faults = 0;
2777 		nodemask_t max_group = NODE_MASK_NONE;
2778 		int a, b;
2779 
2780 		/* Are there nodes at this distance from each other? */
2781 		if (!find_numa_distance(dist))
2782 			continue;
2783 
2784 		for_each_node_mask(a, nodes) {
2785 			unsigned long faults = 0;
2786 			nodemask_t this_group;
2787 			nodes_clear(this_group);
2788 
2789 			/* Sum group's NUMA faults; includes a==b case. */
2790 			for_each_node_mask(b, nodes) {
2791 				if (node_distance(a, b) < dist) {
2792 					faults += group_faults(p, b);
2793 					node_set(b, this_group);
2794 					node_clear(b, nodes);
2795 				}
2796 			}
2797 
2798 			/* Remember the top group. */
2799 			if (faults > max_faults) {
2800 				max_faults = faults;
2801 				max_group = this_group;
2802 				/*
2803 				 * subtle: at the smallest distance there is
2804 				 * just one node left in each "group", the
2805 				 * winner is the preferred nid.
2806 				 */
2807 				nid = a;
2808 			}
2809 		}
2810 		/* Next round, evaluate the nodes within max_group. */
2811 		if (!max_faults)
2812 			break;
2813 		nodes = max_group;
2814 	}
2815 	return nid;
2816 }
2817 
task_numa_placement(struct task_struct * p)2818 static void task_numa_placement(struct task_struct *p)
2819 {
2820 	int seq, nid, max_nid = NUMA_NO_NODE;
2821 	unsigned long max_faults = 0;
2822 	unsigned long fault_types[2] = { 0, 0 };
2823 	unsigned long total_faults;
2824 	u64 runtime, period;
2825 	spinlock_t *group_lock = NULL;
2826 	struct numa_group *ng;
2827 
2828 	/*
2829 	 * The p->mm->numa_scan_seq field gets updated without
2830 	 * exclusive access. Use READ_ONCE() here to ensure
2831 	 * that the field is read in a single access:
2832 	 */
2833 	seq = READ_ONCE(p->mm->numa_scan_seq);
2834 	if (p->numa_scan_seq == seq)
2835 		return;
2836 	p->numa_scan_seq = seq;
2837 	p->numa_scan_period_max = task_scan_max(p);
2838 
2839 	total_faults = p->numa_faults_locality[0] +
2840 		       p->numa_faults_locality[1];
2841 	runtime = numa_get_avg_runtime(p, &period);
2842 
2843 	/* If the task is part of a group prevent parallel updates to group stats */
2844 	ng = deref_curr_numa_group(p);
2845 	if (ng) {
2846 		group_lock = &ng->lock;
2847 		spin_lock_irq(group_lock);
2848 	}
2849 
2850 	/* Find the node with the highest number of faults */
2851 	for_each_online_node(nid) {
2852 		/* Keep track of the offsets in numa_faults array */
2853 		int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
2854 		unsigned long faults = 0, group_faults = 0;
2855 		int priv;
2856 
2857 		for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
2858 			long diff, f_diff, f_weight;
2859 
2860 			mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2861 			membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2862 			cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2863 			cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
2864 
2865 			/* Decay existing window, copy faults since last scan */
2866 			diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2867 			fault_types[priv] += p->numa_faults[membuf_idx];
2868 			p->numa_faults[membuf_idx] = 0;
2869 
2870 			/*
2871 			 * Normalize the faults_from, so all tasks in a group
2872 			 * count according to CPU use, instead of by the raw
2873 			 * number of faults. Tasks with little runtime have
2874 			 * little over-all impact on throughput, and thus their
2875 			 * faults are less important.
2876 			 */
2877 			f_weight = div64_u64(runtime << 16, period + 1);
2878 			f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
2879 				   (total_faults + 1);
2880 			f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2881 			p->numa_faults[cpubuf_idx] = 0;
2882 
2883 			p->numa_faults[mem_idx] += diff;
2884 			p->numa_faults[cpu_idx] += f_diff;
2885 			faults += p->numa_faults[mem_idx];
2886 			p->total_numa_faults += diff;
2887 			if (ng) {
2888 				/*
2889 				 * safe because we can only change our own group
2890 				 *
2891 				 * mem_idx represents the offset for a given
2892 				 * nid and priv in a specific region because it
2893 				 * is at the beginning of the numa_faults array.
2894 				 */
2895 				ng->faults[mem_idx] += diff;
2896 				ng->faults[cpu_idx] += f_diff;
2897 				ng->total_faults += diff;
2898 				group_faults += ng->faults[mem_idx];
2899 			}
2900 		}
2901 
2902 		if (!ng) {
2903 			if (faults > max_faults) {
2904 				max_faults = faults;
2905 				max_nid = nid;
2906 			}
2907 		} else if (group_faults > max_faults) {
2908 			max_faults = group_faults;
2909 			max_nid = nid;
2910 		}
2911 	}
2912 
2913 	/* Cannot migrate task to CPU-less node */
2914 	if (max_nid != NUMA_NO_NODE && !node_state(max_nid, N_CPU)) {
2915 		int near_nid = max_nid;
2916 		int distance, near_distance = INT_MAX;
2917 
2918 		for_each_node_state(nid, N_CPU) {
2919 			distance = node_distance(max_nid, nid);
2920 			if (distance < near_distance) {
2921 				near_nid = nid;
2922 				near_distance = distance;
2923 			}
2924 		}
2925 		max_nid = near_nid;
2926 	}
2927 
2928 	if (ng) {
2929 		numa_group_count_active_nodes(ng);
2930 		spin_unlock_irq(group_lock);
2931 		max_nid = preferred_group_nid(p, max_nid);
2932 	}
2933 
2934 	if (max_faults) {
2935 		/* Set the new preferred node */
2936 		if (max_nid != p->numa_preferred_nid)
2937 			sched_setnuma(p, max_nid);
2938 	}
2939 
2940 	update_task_scan_period(p, fault_types[0], fault_types[1]);
2941 }
2942 
get_numa_group(struct numa_group * grp)2943 static inline int get_numa_group(struct numa_group *grp)
2944 {
2945 	return refcount_inc_not_zero(&grp->refcount);
2946 }
2947 
put_numa_group(struct numa_group * grp)2948 static inline void put_numa_group(struct numa_group *grp)
2949 {
2950 	if (refcount_dec_and_test(&grp->refcount))
2951 		kfree_rcu(grp, rcu);
2952 }
2953 
task_numa_group(struct task_struct * p,int cpupid,int flags,int * priv)2954 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2955 			int *priv)
2956 {
2957 	struct numa_group *grp, *my_grp;
2958 	struct task_struct *tsk;
2959 	bool join = false;
2960 	int cpu = cpupid_to_cpu(cpupid);
2961 	int i;
2962 
2963 	if (unlikely(!deref_curr_numa_group(p))) {
2964 		unsigned int size = sizeof(struct numa_group) +
2965 				    NR_NUMA_HINT_FAULT_STATS *
2966 				    nr_node_ids * sizeof(unsigned long);
2967 
2968 		grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2969 		if (!grp)
2970 			return;
2971 
2972 		refcount_set(&grp->refcount, 1);
2973 		grp->active_nodes = 1;
2974 		grp->max_faults_cpu = 0;
2975 		spin_lock_init(&grp->lock);
2976 		grp->gid = p->pid;
2977 
2978 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2979 			grp->faults[i] = p->numa_faults[i];
2980 
2981 		grp->total_faults = p->total_numa_faults;
2982 
2983 		grp->nr_tasks++;
2984 		rcu_assign_pointer(p->numa_group, grp);
2985 	}
2986 
2987 	rcu_read_lock();
2988 	tsk = READ_ONCE(cpu_rq(cpu)->curr);
2989 
2990 	if (!cpupid_match_pid(tsk, cpupid))
2991 		goto no_join;
2992 
2993 	grp = rcu_dereference(tsk->numa_group);
2994 	if (!grp)
2995 		goto no_join;
2996 
2997 	my_grp = deref_curr_numa_group(p);
2998 	if (grp == my_grp)
2999 		goto no_join;
3000 
3001 	/*
3002 	 * Only join the other group if its bigger; if we're the bigger group,
3003 	 * the other task will join us.
3004 	 */
3005 	if (my_grp->nr_tasks > grp->nr_tasks)
3006 		goto no_join;
3007 
3008 	/*
3009 	 * Tie-break on the grp address.
3010 	 */
3011 	if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
3012 		goto no_join;
3013 
3014 	/* Always join threads in the same process. */
3015 	if (tsk->mm == current->mm)
3016 		join = true;
3017 
3018 	/* Simple filter to avoid false positives due to PID collisions */
3019 	if (flags & TNF_SHARED)
3020 		join = true;
3021 
3022 	/* Update priv based on whether false sharing was detected */
3023 	*priv = !join;
3024 
3025 	if (join && !get_numa_group(grp))
3026 		goto no_join;
3027 
3028 	rcu_read_unlock();
3029 
3030 	if (!join)
3031 		return;
3032 
3033 	WARN_ON_ONCE(irqs_disabled());
3034 	double_lock_irq(&my_grp->lock, &grp->lock);
3035 
3036 	for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
3037 		my_grp->faults[i] -= p->numa_faults[i];
3038 		grp->faults[i] += p->numa_faults[i];
3039 	}
3040 	my_grp->total_faults -= p->total_numa_faults;
3041 	grp->total_faults += p->total_numa_faults;
3042 
3043 	my_grp->nr_tasks--;
3044 	grp->nr_tasks++;
3045 
3046 	spin_unlock(&my_grp->lock);
3047 	spin_unlock_irq(&grp->lock);
3048 
3049 	rcu_assign_pointer(p->numa_group, grp);
3050 
3051 	put_numa_group(my_grp);
3052 	return;
3053 
3054 no_join:
3055 	rcu_read_unlock();
3056 	return;
3057 }
3058 
3059 /*
3060  * Get rid of NUMA statistics associated with a task (either current or dead).
3061  * If @final is set, the task is dead and has reached refcount zero, so we can
3062  * safely free all relevant data structures. Otherwise, there might be
3063  * concurrent reads from places like load balancing and procfs, and we should
3064  * reset the data back to default state without freeing ->numa_faults.
3065  */
task_numa_free(struct task_struct * p,bool final)3066 void task_numa_free(struct task_struct *p, bool final)
3067 {
3068 	/* safe: p either is current or is being freed by current */
3069 	struct numa_group *grp = rcu_dereference_raw(p->numa_group);
3070 	unsigned long *numa_faults = p->numa_faults;
3071 	unsigned long flags;
3072 	int i;
3073 
3074 	if (!numa_faults)
3075 		return;
3076 
3077 	if (grp) {
3078 		spin_lock_irqsave(&grp->lock, flags);
3079 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
3080 			grp->faults[i] -= p->numa_faults[i];
3081 		grp->total_faults -= p->total_numa_faults;
3082 
3083 		grp->nr_tasks--;
3084 		spin_unlock_irqrestore(&grp->lock, flags);
3085 		RCU_INIT_POINTER(p->numa_group, NULL);
3086 		put_numa_group(grp);
3087 	}
3088 
3089 	if (final) {
3090 		p->numa_faults = NULL;
3091 		kfree(numa_faults);
3092 	} else {
3093 		p->total_numa_faults = 0;
3094 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
3095 			numa_faults[i] = 0;
3096 	}
3097 }
3098 
3099 /*
3100  * Got a PROT_NONE fault for a page on @node.
3101  */
task_numa_fault(int last_cpupid,int mem_node,int pages,int flags)3102 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
3103 {
3104 	struct task_struct *p = current;
3105 	bool migrated = flags & TNF_MIGRATED;
3106 	int cpu_node = task_node(current);
3107 	int local = !!(flags & TNF_FAULT_LOCAL);
3108 	struct numa_group *ng;
3109 	int priv;
3110 
3111 	if (!static_branch_likely(&sched_numa_balancing))
3112 		return;
3113 
3114 	/* for example, ksmd faulting in a user's mm */
3115 	if (!p->mm)
3116 		return;
3117 
3118 	/*
3119 	 * NUMA faults statistics are unnecessary for the slow memory
3120 	 * node for memory tiering mode.
3121 	 */
3122 	if (!node_is_toptier(mem_node) &&
3123 	    (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ||
3124 	     !cpupid_valid(last_cpupid)))
3125 		return;
3126 
3127 	/* Allocate buffer to track faults on a per-node basis */
3128 	if (unlikely(!p->numa_faults)) {
3129 		int size = sizeof(*p->numa_faults) *
3130 			   NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
3131 
3132 		p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
3133 		if (!p->numa_faults)
3134 			return;
3135 
3136 		p->total_numa_faults = 0;
3137 		memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
3138 	}
3139 
3140 	/*
3141 	 * First accesses are treated as private, otherwise consider accesses
3142 	 * to be private if the accessing pid has not changed
3143 	 */
3144 	if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
3145 		priv = 1;
3146 	} else {
3147 		priv = cpupid_match_pid(p, last_cpupid);
3148 		if (!priv && !(flags & TNF_NO_GROUP))
3149 			task_numa_group(p, last_cpupid, flags, &priv);
3150 	}
3151 
3152 	/*
3153 	 * If a workload spans multiple NUMA nodes, a shared fault that
3154 	 * occurs wholly within the set of nodes that the workload is
3155 	 * actively using should be counted as local. This allows the
3156 	 * scan rate to slow down when a workload has settled down.
3157 	 */
3158 	ng = deref_curr_numa_group(p);
3159 	if (!priv && !local && ng && ng->active_nodes > 1 &&
3160 				numa_is_active_node(cpu_node, ng) &&
3161 				numa_is_active_node(mem_node, ng))
3162 		local = 1;
3163 
3164 	/*
3165 	 * Retry to migrate task to preferred node periodically, in case it
3166 	 * previously failed, or the scheduler moved us.
3167 	 */
3168 	if (time_after(jiffies, p->numa_migrate_retry)) {
3169 		task_numa_placement(p);
3170 		numa_migrate_preferred(p);
3171 	}
3172 
3173 	if (migrated)
3174 		p->numa_pages_migrated += pages;
3175 	if (flags & TNF_MIGRATE_FAIL)
3176 		p->numa_faults_locality[2] += pages;
3177 
3178 	p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
3179 	p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
3180 	p->numa_faults_locality[local] += pages;
3181 }
3182 
reset_ptenuma_scan(struct task_struct * p)3183 static void reset_ptenuma_scan(struct task_struct *p)
3184 {
3185 	/*
3186 	 * We only did a read acquisition of the mmap sem, so
3187 	 * p->mm->numa_scan_seq is written to without exclusive access
3188 	 * and the update is not guaranteed to be atomic. That's not
3189 	 * much of an issue though, since this is just used for
3190 	 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
3191 	 * expensive, to avoid any form of compiler optimizations:
3192 	 */
3193 	WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
3194 	p->mm->numa_scan_offset = 0;
3195 }
3196 
vma_is_accessed(struct vm_area_struct * vma)3197 static bool vma_is_accessed(struct vm_area_struct *vma)
3198 {
3199 	unsigned long pids;
3200 	/*
3201 	 * Allow unconditional access first two times, so that all the (pages)
3202 	 * of VMAs get prot_none fault introduced irrespective of accesses.
3203 	 * This is also done to avoid any side effect of task scanning
3204 	 * amplifying the unfairness of disjoint set of VMAs' access.
3205 	 */
3206 	if (READ_ONCE(current->mm->numa_scan_seq) < 2)
3207 		return true;
3208 
3209 	pids = vma->numab_state->access_pids[0] | vma->numab_state->access_pids[1];
3210 	return test_bit(hash_32(current->pid, ilog2(BITS_PER_LONG)), &pids);
3211 }
3212 
3213 #define VMA_PID_RESET_PERIOD (4 * sysctl_numa_balancing_scan_delay)
3214 
3215 /*
3216  * The expensive part of numa migration is done from task_work context.
3217  * Triggered from task_tick_numa().
3218  */
task_numa_work(struct callback_head * work)3219 static void task_numa_work(struct callback_head *work)
3220 {
3221 	unsigned long migrate, next_scan, now = jiffies;
3222 	struct task_struct *p = current;
3223 	struct mm_struct *mm = p->mm;
3224 	u64 runtime = p->se.sum_exec_runtime;
3225 	struct vm_area_struct *vma;
3226 	unsigned long start, end;
3227 	unsigned long nr_pte_updates = 0;
3228 	long pages, virtpages;
3229 	struct vma_iterator vmi;
3230 
3231 	SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
3232 
3233 	work->next = work;
3234 	/*
3235 	 * Who cares about NUMA placement when they're dying.
3236 	 *
3237 	 * NOTE: make sure not to dereference p->mm before this check,
3238 	 * exit_task_work() happens _after_ exit_mm() so we could be called
3239 	 * without p->mm even though we still had it when we enqueued this
3240 	 * work.
3241 	 */
3242 	if (p->flags & PF_EXITING)
3243 		return;
3244 
3245 	if (!mm->numa_next_scan) {
3246 		mm->numa_next_scan = now +
3247 			msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
3248 	}
3249 
3250 	/*
3251 	 * Enforce maximal scan/migration frequency..
3252 	 */
3253 	migrate = mm->numa_next_scan;
3254 	if (time_before(now, migrate))
3255 		return;
3256 
3257 	if (p->numa_scan_period == 0) {
3258 		p->numa_scan_period_max = task_scan_max(p);
3259 		p->numa_scan_period = task_scan_start(p);
3260 	}
3261 
3262 	next_scan = now + msecs_to_jiffies(p->numa_scan_period);
3263 	if (!try_cmpxchg(&mm->numa_next_scan, &migrate, next_scan))
3264 		return;
3265 
3266 	/*
3267 	 * Delay this task enough that another task of this mm will likely win
3268 	 * the next time around.
3269 	 */
3270 	p->node_stamp += 2 * TICK_NSEC;
3271 
3272 	start = mm->numa_scan_offset;
3273 	pages = sysctl_numa_balancing_scan_size;
3274 	pages <<= 20 - PAGE_SHIFT; /* MB in pages */
3275 	virtpages = pages * 8;	   /* Scan up to this much virtual space */
3276 	if (!pages)
3277 		return;
3278 
3279 
3280 	if (!mmap_read_trylock(mm))
3281 		return;
3282 	vma_iter_init(&vmi, mm, start);
3283 	vma = vma_next(&vmi);
3284 	if (!vma) {
3285 		reset_ptenuma_scan(p);
3286 		start = 0;
3287 		vma_iter_set(&vmi, start);
3288 		vma = vma_next(&vmi);
3289 	}
3290 
3291 	do {
3292 		if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
3293 			is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
3294 			continue;
3295 		}
3296 
3297 		/*
3298 		 * Shared library pages mapped by multiple processes are not
3299 		 * migrated as it is expected they are cache replicated. Avoid
3300 		 * hinting faults in read-only file-backed mappings or the vdso
3301 		 * as migrating the pages will be of marginal benefit.
3302 		 */
3303 		if (!vma->vm_mm ||
3304 		    (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
3305 			continue;
3306 
3307 		/*
3308 		 * Skip inaccessible VMAs to avoid any confusion between
3309 		 * PROT_NONE and NUMA hinting ptes
3310 		 */
3311 		if (!vma_is_accessible(vma))
3312 			continue;
3313 
3314 		/* Initialise new per-VMA NUMAB state. */
3315 		if (!vma->numab_state) {
3316 			vma->numab_state = kzalloc(sizeof(struct vma_numab_state),
3317 				GFP_KERNEL);
3318 			if (!vma->numab_state)
3319 				continue;
3320 
3321 			vma->numab_state->next_scan = now +
3322 				msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
3323 
3324 			/* Reset happens after 4 times scan delay of scan start */
3325 			vma->numab_state->next_pid_reset =  vma->numab_state->next_scan +
3326 				msecs_to_jiffies(VMA_PID_RESET_PERIOD);
3327 		}
3328 
3329 		/*
3330 		 * Scanning the VMA's of short lived tasks add more overhead. So
3331 		 * delay the scan for new VMAs.
3332 		 */
3333 		if (mm->numa_scan_seq && time_before(jiffies,
3334 						vma->numab_state->next_scan))
3335 			continue;
3336 
3337 		/* Do not scan the VMA if task has not accessed */
3338 		if (!vma_is_accessed(vma))
3339 			continue;
3340 
3341 		/*
3342 		 * RESET access PIDs regularly for old VMAs. Resetting after checking
3343 		 * vma for recent access to avoid clearing PID info before access..
3344 		 */
3345 		if (mm->numa_scan_seq &&
3346 				time_after(jiffies, vma->numab_state->next_pid_reset)) {
3347 			vma->numab_state->next_pid_reset = vma->numab_state->next_pid_reset +
3348 				msecs_to_jiffies(VMA_PID_RESET_PERIOD);
3349 			vma->numab_state->access_pids[0] = READ_ONCE(vma->numab_state->access_pids[1]);
3350 			vma->numab_state->access_pids[1] = 0;
3351 		}
3352 
3353 		do {
3354 			start = max(start, vma->vm_start);
3355 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
3356 			end = min(end, vma->vm_end);
3357 			nr_pte_updates = change_prot_numa(vma, start, end);
3358 
3359 			/*
3360 			 * Try to scan sysctl_numa_balancing_size worth of
3361 			 * hpages that have at least one present PTE that
3362 			 * is not already pte-numa. If the VMA contains
3363 			 * areas that are unused or already full of prot_numa
3364 			 * PTEs, scan up to virtpages, to skip through those
3365 			 * areas faster.
3366 			 */
3367 			if (nr_pte_updates)
3368 				pages -= (end - start) >> PAGE_SHIFT;
3369 			virtpages -= (end - start) >> PAGE_SHIFT;
3370 
3371 			start = end;
3372 			if (pages <= 0 || virtpages <= 0)
3373 				goto out;
3374 
3375 			cond_resched();
3376 		} while (end != vma->vm_end);
3377 	} for_each_vma(vmi, vma);
3378 
3379 out:
3380 	/*
3381 	 * It is possible to reach the end of the VMA list but the last few
3382 	 * VMAs are not guaranteed to the vma_migratable. If they are not, we
3383 	 * would find the !migratable VMA on the next scan but not reset the
3384 	 * scanner to the start so check it now.
3385 	 */
3386 	if (vma)
3387 		mm->numa_scan_offset = start;
3388 	else
3389 		reset_ptenuma_scan(p);
3390 	mmap_read_unlock(mm);
3391 
3392 	/*
3393 	 * Make sure tasks use at least 32x as much time to run other code
3394 	 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
3395 	 * Usually update_task_scan_period slows down scanning enough; on an
3396 	 * overloaded system we need to limit overhead on a per task basis.
3397 	 */
3398 	if (unlikely(p->se.sum_exec_runtime != runtime)) {
3399 		u64 diff = p->se.sum_exec_runtime - runtime;
3400 		p->node_stamp += 32 * diff;
3401 	}
3402 }
3403 
init_numa_balancing(unsigned long clone_flags,struct task_struct * p)3404 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
3405 {
3406 	int mm_users = 0;
3407 	struct mm_struct *mm = p->mm;
3408 
3409 	if (mm) {
3410 		mm_users = atomic_read(&mm->mm_users);
3411 		if (mm_users == 1) {
3412 			mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
3413 			mm->numa_scan_seq = 0;
3414 		}
3415 	}
3416 	p->node_stamp			= 0;
3417 	p->numa_scan_seq		= mm ? mm->numa_scan_seq : 0;
3418 	p->numa_scan_period		= sysctl_numa_balancing_scan_delay;
3419 	p->numa_migrate_retry		= 0;
3420 	/* Protect against double add, see task_tick_numa and task_numa_work */
3421 	p->numa_work.next		= &p->numa_work;
3422 	p->numa_faults			= NULL;
3423 	p->numa_pages_migrated		= 0;
3424 	p->total_numa_faults		= 0;
3425 	RCU_INIT_POINTER(p->numa_group, NULL);
3426 	p->last_task_numa_placement	= 0;
3427 	p->last_sum_exec_runtime	= 0;
3428 
3429 	init_task_work(&p->numa_work, task_numa_work);
3430 
3431 	/* New address space, reset the preferred nid */
3432 	if (!(clone_flags & CLONE_VM)) {
3433 		p->numa_preferred_nid = NUMA_NO_NODE;
3434 		return;
3435 	}
3436 
3437 	/*
3438 	 * New thread, keep existing numa_preferred_nid which should be copied
3439 	 * already by arch_dup_task_struct but stagger when scans start.
3440 	 */
3441 	if (mm) {
3442 		unsigned int delay;
3443 
3444 		delay = min_t(unsigned int, task_scan_max(current),
3445 			current->numa_scan_period * mm_users * NSEC_PER_MSEC);
3446 		delay += 2 * TICK_NSEC;
3447 		p->node_stamp = delay;
3448 	}
3449 }
3450 
3451 /*
3452  * Drive the periodic memory faults..
3453  */
task_tick_numa(struct rq * rq,struct task_struct * curr)3454 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3455 {
3456 	struct callback_head *work = &curr->numa_work;
3457 	u64 period, now;
3458 
3459 	/*
3460 	 * We don't care about NUMA placement if we don't have memory.
3461 	 */
3462 	if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
3463 		return;
3464 
3465 	/*
3466 	 * Using runtime rather than walltime has the dual advantage that
3467 	 * we (mostly) drive the selection from busy threads and that the
3468 	 * task needs to have done some actual work before we bother with
3469 	 * NUMA placement.
3470 	 */
3471 	now = curr->se.sum_exec_runtime;
3472 	period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
3473 
3474 	if (now > curr->node_stamp + period) {
3475 		if (!curr->node_stamp)
3476 			curr->numa_scan_period = task_scan_start(curr);
3477 		curr->node_stamp += period;
3478 
3479 		if (!time_before(jiffies, curr->mm->numa_next_scan))
3480 			task_work_add(curr, work, TWA_RESUME);
3481 	}
3482 }
3483 
update_scan_period(struct task_struct * p,int new_cpu)3484 static void update_scan_period(struct task_struct *p, int new_cpu)
3485 {
3486 	int src_nid = cpu_to_node(task_cpu(p));
3487 	int dst_nid = cpu_to_node(new_cpu);
3488 
3489 	if (!static_branch_likely(&sched_numa_balancing))
3490 		return;
3491 
3492 	if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
3493 		return;
3494 
3495 	if (src_nid == dst_nid)
3496 		return;
3497 
3498 	/*
3499 	 * Allow resets if faults have been trapped before one scan
3500 	 * has completed. This is most likely due to a new task that
3501 	 * is pulled cross-node due to wakeups or load balancing.
3502 	 */
3503 	if (p->numa_scan_seq) {
3504 		/*
3505 		 * Avoid scan adjustments if moving to the preferred
3506 		 * node or if the task was not previously running on
3507 		 * the preferred node.
3508 		 */
3509 		if (dst_nid == p->numa_preferred_nid ||
3510 		    (p->numa_preferred_nid != NUMA_NO_NODE &&
3511 			src_nid != p->numa_preferred_nid))
3512 			return;
3513 	}
3514 
3515 	p->numa_scan_period = task_scan_start(p);
3516 }
3517 
3518 #else
task_tick_numa(struct rq * rq,struct task_struct * curr)3519 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3520 {
3521 }
3522 
account_numa_enqueue(struct rq * rq,struct task_struct * p)3523 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
3524 {
3525 }
3526 
account_numa_dequeue(struct rq * rq,struct task_struct * p)3527 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
3528 {
3529 }
3530 
update_scan_period(struct task_struct * p,int new_cpu)3531 static inline void update_scan_period(struct task_struct *p, int new_cpu)
3532 {
3533 }
3534 
3535 #endif /* CONFIG_NUMA_BALANCING */
3536 
3537 static void
account_entity_enqueue(struct cfs_rq * cfs_rq,struct sched_entity * se)3538 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3539 {
3540 	update_load_add(&cfs_rq->load, se->load.weight);
3541 #ifdef CONFIG_SMP
3542 	if (entity_is_task(se)) {
3543 		struct rq *rq = rq_of(cfs_rq);
3544 
3545 		account_numa_enqueue(rq, task_of(se));
3546 		list_add(&se->group_node, &rq->cfs_tasks);
3547 	}
3548 #endif
3549 	cfs_rq->nr_running++;
3550 	if (se_is_idle(se))
3551 		cfs_rq->idle_nr_running++;
3552 }
3553 
3554 static void
account_entity_dequeue(struct cfs_rq * cfs_rq,struct sched_entity * se)3555 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3556 {
3557 	update_load_sub(&cfs_rq->load, se->load.weight);
3558 #ifdef CONFIG_SMP
3559 	if (entity_is_task(se)) {
3560 		account_numa_dequeue(rq_of(cfs_rq), task_of(se));
3561 		list_del_init(&se->group_node);
3562 	}
3563 #endif
3564 	cfs_rq->nr_running--;
3565 	if (se_is_idle(se))
3566 		cfs_rq->idle_nr_running--;
3567 }
3568 
3569 /*
3570  * Signed add and clamp on underflow.
3571  *
3572  * Explicitly do a load-store to ensure the intermediate value never hits
3573  * memory. This allows lockless observations without ever seeing the negative
3574  * values.
3575  */
3576 #define add_positive(_ptr, _val) do {                           \
3577 	typeof(_ptr) ptr = (_ptr);                              \
3578 	typeof(_val) val = (_val);                              \
3579 	typeof(*ptr) res, var = READ_ONCE(*ptr);                \
3580 								\
3581 	res = var + val;                                        \
3582 								\
3583 	if (val < 0 && res > var)                               \
3584 		res = 0;                                        \
3585 								\
3586 	WRITE_ONCE(*ptr, res);                                  \
3587 } while (0)
3588 
3589 /*
3590  * Unsigned subtract and clamp on underflow.
3591  *
3592  * Explicitly do a load-store to ensure the intermediate value never hits
3593  * memory. This allows lockless observations without ever seeing the negative
3594  * values.
3595  */
3596 #define sub_positive(_ptr, _val) do {				\
3597 	typeof(_ptr) ptr = (_ptr);				\
3598 	typeof(*ptr) val = (_val);				\
3599 	typeof(*ptr) res, var = READ_ONCE(*ptr);		\
3600 	res = var - val;					\
3601 	if (res > var)						\
3602 		res = 0;					\
3603 	WRITE_ONCE(*ptr, res);					\
3604 } while (0)
3605 
3606 /*
3607  * Remove and clamp on negative, from a local variable.
3608  *
3609  * A variant of sub_positive(), which does not use explicit load-store
3610  * and is thus optimized for local variable updates.
3611  */
3612 #define lsub_positive(_ptr, _val) do {				\
3613 	typeof(_ptr) ptr = (_ptr);				\
3614 	*ptr -= min_t(typeof(*ptr), *ptr, _val);		\
3615 } while (0)
3616 
3617 #ifdef CONFIG_SMP
3618 static inline void
enqueue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3619 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3620 {
3621 	cfs_rq->avg.load_avg += se->avg.load_avg;
3622 	cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
3623 }
3624 
3625 static inline void
dequeue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3626 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3627 {
3628 	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3629 	sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3630 	/* See update_cfs_rq_load_avg() */
3631 	cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
3632 					  cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
3633 }
3634 #else
3635 static inline void
enqueue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3636 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3637 static inline void
dequeue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3638 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3639 #endif
3640 
reweight_eevdf(struct sched_entity * se,u64 avruntime,unsigned long weight)3641 static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
3642 			   unsigned long weight)
3643 {
3644 	unsigned long old_weight = se->load.weight;
3645 	s64 vlag, vslice;
3646 
3647 	/*
3648 	 * VRUNTIME
3649 	 * ========
3650 	 *
3651 	 * COROLLARY #1: The virtual runtime of the entity needs to be
3652 	 * adjusted if re-weight at !0-lag point.
3653 	 *
3654 	 * Proof: For contradiction assume this is not true, so we can
3655 	 * re-weight without changing vruntime at !0-lag point.
3656 	 *
3657 	 *             Weight	VRuntime   Avg-VRuntime
3658 	 *     before    w          v            V
3659 	 *      after    w'         v'           V'
3660 	 *
3661 	 * Since lag needs to be preserved through re-weight:
3662 	 *
3663 	 *	lag = (V - v)*w = (V'- v')*w', where v = v'
3664 	 *	==>	V' = (V - v)*w/w' + v		(1)
3665 	 *
3666 	 * Let W be the total weight of the entities before reweight,
3667 	 * since V' is the new weighted average of entities:
3668 	 *
3669 	 *	V' = (WV + w'v - wv) / (W + w' - w)	(2)
3670 	 *
3671 	 * by using (1) & (2) we obtain:
3672 	 *
3673 	 *	(WV + w'v - wv) / (W + w' - w) = (V - v)*w/w' + v
3674 	 *	==> (WV-Wv+Wv+w'v-wv)/(W+w'-w) = (V - v)*w/w' + v
3675 	 *	==> (WV - Wv)/(W + w' - w) + v = (V - v)*w/w' + v
3676 	 *	==>	(V - v)*W/(W + w' - w) = (V - v)*w/w' (3)
3677 	 *
3678 	 * Since we are doing at !0-lag point which means V != v, we
3679 	 * can simplify (3):
3680 	 *
3681 	 *	==>	W / (W + w' - w) = w / w'
3682 	 *	==>	Ww' = Ww + ww' - ww
3683 	 *	==>	W * (w' - w) = w * (w' - w)
3684 	 *	==>	W = w	(re-weight indicates w' != w)
3685 	 *
3686 	 * So the cfs_rq contains only one entity, hence vruntime of
3687 	 * the entity @v should always equal to the cfs_rq's weighted
3688 	 * average vruntime @V, which means we will always re-weight
3689 	 * at 0-lag point, thus breach assumption. Proof completed.
3690 	 *
3691 	 *
3692 	 * COROLLARY #2: Re-weight does NOT affect weighted average
3693 	 * vruntime of all the entities.
3694 	 *
3695 	 * Proof: According to corollary #1, Eq. (1) should be:
3696 	 *
3697 	 *	(V - v)*w = (V' - v')*w'
3698 	 *	==>    v' = V' - (V - v)*w/w'		(4)
3699 	 *
3700 	 * According to the weighted average formula, we have:
3701 	 *
3702 	 *	V' = (WV - wv + w'v') / (W - w + w')
3703 	 *	   = (WV - wv + w'(V' - (V - v)w/w')) / (W - w + w')
3704 	 *	   = (WV - wv + w'V' - Vw + wv) / (W - w + w')
3705 	 *	   = (WV + w'V' - Vw) / (W - w + w')
3706 	 *
3707 	 *	==>  V'*(W - w + w') = WV + w'V' - Vw
3708 	 *	==>	V' * (W - w) = (W - w) * V	(5)
3709 	 *
3710 	 * If the entity is the only one in the cfs_rq, then reweight
3711 	 * always occurs at 0-lag point, so V won't change. Or else
3712 	 * there are other entities, hence W != w, then Eq. (5) turns
3713 	 * into V' = V. So V won't change in either case, proof done.
3714 	 *
3715 	 *
3716 	 * So according to corollary #1 & #2, the effect of re-weight
3717 	 * on vruntime should be:
3718 	 *
3719 	 *	v' = V' - (V - v) * w / w'		(4)
3720 	 *	   = V  - (V - v) * w / w'
3721 	 *	   = V  - vl * w / w'
3722 	 *	   = V  - vl'
3723 	 */
3724 	if (avruntime != se->vruntime) {
3725 		vlag = entity_lag(avruntime, se);
3726 		vlag = div_s64(vlag * old_weight, weight);
3727 		se->vruntime = avruntime - vlag;
3728 	}
3729 
3730 	/*
3731 	 * DEADLINE
3732 	 * ========
3733 	 *
3734 	 * When the weight changes, the virtual time slope changes and
3735 	 * we should adjust the relative virtual deadline accordingly.
3736 	 *
3737 	 *	d' = v' + (d - v)*w/w'
3738 	 *	   = V' - (V - v)*w/w' + (d - v)*w/w'
3739 	 *	   = V  - (V - v)*w/w' + (d - v)*w/w'
3740 	 *	   = V  + (d - V)*w/w'
3741 	 */
3742 	vslice = (s64)(se->deadline - avruntime);
3743 	vslice = div_s64(vslice * old_weight, weight);
3744 	se->deadline = avruntime + vslice;
3745 }
3746 
reweight_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,unsigned long weight)3747 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
3748 			    unsigned long weight)
3749 {
3750 	bool curr = cfs_rq->curr == se;
3751 	u64 avruntime;
3752 
3753 	if (se->on_rq) {
3754 		/* commit outstanding execution time */
3755 		update_curr(cfs_rq);
3756 		avruntime = avg_vruntime(cfs_rq);
3757 		if (!curr)
3758 			__dequeue_entity(cfs_rq, se);
3759 		update_load_sub(&cfs_rq->load, se->load.weight);
3760 	}
3761 	dequeue_load_avg(cfs_rq, se);
3762 
3763 	if (se->on_rq) {
3764 		reweight_eevdf(se, avruntime, weight);
3765 	} else {
3766 		/*
3767 		 * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
3768 		 * we need to scale se->vlag when w_i changes.
3769 		 */
3770 		se->vlag = div_s64(se->vlag * se->load.weight, weight);
3771 	}
3772 
3773 	update_load_set(&se->load, weight);
3774 
3775 	trace_android_vh_reweight_entity(se);
3776 #ifdef CONFIG_SMP
3777 	do {
3778 		u32 divider = get_pelt_divider(&se->avg);
3779 
3780 		se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
3781 	} while (0);
3782 #endif
3783 
3784 	enqueue_load_avg(cfs_rq, se);
3785 	if (se->on_rq) {
3786 		update_load_add(&cfs_rq->load, se->load.weight);
3787 		if (!curr)
3788 			__enqueue_entity(cfs_rq, se);
3789 
3790 		/*
3791 		 * The entity's vruntime has been adjusted, so let's check
3792 		 * whether the rq-wide min_vruntime needs updated too. Since
3793 		 * the calculations above require stable min_vruntime rather
3794 		 * than up-to-date one, we do the update at the end of the
3795 		 * reweight process.
3796 		 */
3797 		update_min_vruntime(cfs_rq);
3798 	}
3799 }
3800 
reweight_task(struct task_struct * p,int prio)3801 void reweight_task(struct task_struct *p, int prio)
3802 {
3803 	struct sched_entity *se = &p->se;
3804 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3805 	struct load_weight *load = &se->load;
3806 	unsigned long weight = scale_load(sched_prio_to_weight[prio]);
3807 
3808 	reweight_entity(cfs_rq, se, weight);
3809 	load->inv_weight = sched_prio_to_wmult[prio];
3810 }
3811 EXPORT_SYMBOL_GPL(reweight_task);
3812 
3813 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3814 
3815 #ifdef CONFIG_FAIR_GROUP_SCHED
3816 #ifdef CONFIG_SMP
3817 /*
3818  * All this does is approximate the hierarchical proportion which includes that
3819  * global sum we all love to hate.
3820  *
3821  * That is, the weight of a group entity, is the proportional share of the
3822  * group weight based on the group runqueue weights. That is:
3823  *
3824  *                     tg->weight * grq->load.weight
3825  *   ge->load.weight = -----------------------------               (1)
3826  *                       \Sum grq->load.weight
3827  *
3828  * Now, because computing that sum is prohibitively expensive to compute (been
3829  * there, done that) we approximate it with this average stuff. The average
3830  * moves slower and therefore the approximation is cheaper and more stable.
3831  *
3832  * So instead of the above, we substitute:
3833  *
3834  *   grq->load.weight -> grq->avg.load_avg                         (2)
3835  *
3836  * which yields the following:
3837  *
3838  *                     tg->weight * grq->avg.load_avg
3839  *   ge->load.weight = ------------------------------              (3)
3840  *                             tg->load_avg
3841  *
3842  * Where: tg->load_avg ~= \Sum grq->avg.load_avg
3843  *
3844  * That is shares_avg, and it is right (given the approximation (2)).
3845  *
3846  * The problem with it is that because the average is slow -- it was designed
3847  * to be exactly that of course -- this leads to transients in boundary
3848  * conditions. In specific, the case where the group was idle and we start the
3849  * one task. It takes time for our CPU's grq->avg.load_avg to build up,
3850  * yielding bad latency etc..
3851  *
3852  * Now, in that special case (1) reduces to:
3853  *
3854  *                     tg->weight * grq->load.weight
3855  *   ge->load.weight = ----------------------------- = tg->weight   (4)
3856  *                         grp->load.weight
3857  *
3858  * That is, the sum collapses because all other CPUs are idle; the UP scenario.
3859  *
3860  * So what we do is modify our approximation (3) to approach (4) in the (near)
3861  * UP case, like:
3862  *
3863  *   ge->load.weight =
3864  *
3865  *              tg->weight * grq->load.weight
3866  *     ---------------------------------------------------         (5)
3867  *     tg->load_avg - grq->avg.load_avg + grq->load.weight
3868  *
3869  * But because grq->load.weight can drop to 0, resulting in a divide by zero,
3870  * we need to use grq->avg.load_avg as its lower bound, which then gives:
3871  *
3872  *
3873  *                     tg->weight * grq->load.weight
3874  *   ge->load.weight = -----------------------------		   (6)
3875  *                             tg_load_avg'
3876  *
3877  * Where:
3878  *
3879  *   tg_load_avg' = tg->load_avg - grq->avg.load_avg +
3880  *                  max(grq->load.weight, grq->avg.load_avg)
3881  *
3882  * And that is shares_weight and is icky. In the (near) UP case it approaches
3883  * (4) while in the normal case it approaches (3). It consistently
3884  * overestimates the ge->load.weight and therefore:
3885  *
3886  *   \Sum ge->load.weight >= tg->weight
3887  *
3888  * hence icky!
3889  */
calc_group_shares(struct cfs_rq * cfs_rq)3890 static long calc_group_shares(struct cfs_rq *cfs_rq)
3891 {
3892 	long tg_weight, tg_shares, load, shares;
3893 	struct task_group *tg = cfs_rq->tg;
3894 
3895 	tg_shares = READ_ONCE(tg->shares);
3896 
3897 	load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
3898 
3899 	tg_weight = atomic_long_read(&tg->load_avg);
3900 
3901 	/* Ensure tg_weight >= load */
3902 	tg_weight -= cfs_rq->tg_load_avg_contrib;
3903 	tg_weight += load;
3904 
3905 	shares = (tg_shares * load);
3906 	if (tg_weight)
3907 		shares /= tg_weight;
3908 
3909 	/*
3910 	 * MIN_SHARES has to be unscaled here to support per-CPU partitioning
3911 	 * of a group with small tg->shares value. It is a floor value which is
3912 	 * assigned as a minimum load.weight to the sched_entity representing
3913 	 * the group on a CPU.
3914 	 *
3915 	 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
3916 	 * on an 8-core system with 8 tasks each runnable on one CPU shares has
3917 	 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
3918 	 * case no task is runnable on a CPU MIN_SHARES=2 should be returned
3919 	 * instead of 0.
3920 	 */
3921 	return clamp_t(long, shares, MIN_SHARES, tg_shares);
3922 }
3923 #endif /* CONFIG_SMP */
3924 
3925 /*
3926  * Recomputes the group entity based on the current state of its group
3927  * runqueue.
3928  */
update_cfs_group(struct sched_entity * se)3929 static void update_cfs_group(struct sched_entity *se)
3930 {
3931 	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3932 	long shares;
3933 
3934 	if (!gcfs_rq)
3935 		return;
3936 
3937 	if (throttled_hierarchy(gcfs_rq))
3938 		return;
3939 
3940 #ifndef CONFIG_SMP
3941 	shares = READ_ONCE(gcfs_rq->tg->shares);
3942 #else
3943 	shares = calc_group_shares(gcfs_rq);
3944 #endif
3945 	if (unlikely(se->load.weight != shares))
3946 		reweight_entity(cfs_rq_of(se), se, shares);
3947 }
3948 
3949 #else /* CONFIG_FAIR_GROUP_SCHED */
update_cfs_group(struct sched_entity * se)3950 static inline void update_cfs_group(struct sched_entity *se)
3951 {
3952 }
3953 #endif /* CONFIG_FAIR_GROUP_SCHED */
3954 
cfs_rq_util_change(struct cfs_rq * cfs_rq,int flags)3955 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
3956 {
3957 	struct rq *rq = rq_of(cfs_rq);
3958 
3959 	if (&rq->cfs == cfs_rq) {
3960 		/*
3961 		 * There are a few boundary cases this might miss but it should
3962 		 * get called often enough that that should (hopefully) not be
3963 		 * a real problem.
3964 		 *
3965 		 * It will not get called when we go idle, because the idle
3966 		 * thread is a different class (!fair), nor will the utilization
3967 		 * number include things like RT tasks.
3968 		 *
3969 		 * As is, the util number is not freq-invariant (we'd have to
3970 		 * implement arch_scale_freq_capacity() for that).
3971 		 *
3972 		 * See cpu_util_cfs().
3973 		 */
3974 		cpufreq_update_util(rq, flags);
3975 	}
3976 }
3977 
3978 #ifdef CONFIG_SMP
load_avg_is_decayed(struct sched_avg * sa)3979 static inline bool load_avg_is_decayed(struct sched_avg *sa)
3980 {
3981 	if (sa->load_sum)
3982 		return false;
3983 
3984 	if (sa->util_sum)
3985 		return false;
3986 
3987 	if (sa->runnable_sum)
3988 		return false;
3989 
3990 	/*
3991 	 * _avg must be null when _sum are null because _avg = _sum / divider
3992 	 * Make sure that rounding and/or propagation of PELT values never
3993 	 * break this.
3994 	 */
3995 	SCHED_WARN_ON(sa->load_avg ||
3996 		      sa->util_avg ||
3997 		      sa->runnable_avg);
3998 
3999 	return true;
4000 }
4001 
cfs_rq_last_update_time(struct cfs_rq * cfs_rq)4002 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
4003 {
4004 	return u64_u32_load_copy(cfs_rq->avg.last_update_time,
4005 				 cfs_rq->last_update_time_copy);
4006 }
4007 #ifdef CONFIG_FAIR_GROUP_SCHED
4008 /*
4009  * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
4010  * immediately before a parent cfs_rq, and cfs_rqs are removed from the list
4011  * bottom-up, we only have to test whether the cfs_rq before us on the list
4012  * is our child.
4013  * If cfs_rq is not on the list, test whether a child needs its to be added to
4014  * connect a branch to the tree  * (see list_add_leaf_cfs_rq() for details).
4015  */
child_cfs_rq_on_list(struct cfs_rq * cfs_rq)4016 static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
4017 {
4018 	struct cfs_rq *prev_cfs_rq;
4019 	struct list_head *prev;
4020 
4021 	if (cfs_rq->on_list) {
4022 		prev = cfs_rq->leaf_cfs_rq_list.prev;
4023 	} else {
4024 		struct rq *rq = rq_of(cfs_rq);
4025 
4026 		prev = rq->tmp_alone_branch;
4027 	}
4028 
4029 	prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
4030 
4031 	return (prev_cfs_rq->tg->parent == cfs_rq->tg);
4032 }
4033 
cfs_rq_is_decayed(struct cfs_rq * cfs_rq)4034 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
4035 {
4036 	if (cfs_rq->load.weight)
4037 		return false;
4038 
4039 	if (!load_avg_is_decayed(&cfs_rq->avg))
4040 		return false;
4041 
4042 	if (child_cfs_rq_on_list(cfs_rq))
4043 		return false;
4044 
4045 	return true;
4046 }
4047 
4048 /**
4049  * update_tg_load_avg - update the tg's load avg
4050  * @cfs_rq: the cfs_rq whose avg changed
4051  *
4052  * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
4053  * However, because tg->load_avg is a global value there are performance
4054  * considerations.
4055  *
4056  * In order to avoid having to look at the other cfs_rq's, we use a
4057  * differential update where we store the last value we propagated. This in
4058  * turn allows skipping updates if the differential is 'small'.
4059  *
4060  * Updating tg's load_avg is necessary before update_cfs_share().
4061  */
update_tg_load_avg(struct cfs_rq * cfs_rq)4062 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
4063 {
4064 	long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
4065 
4066 	/*
4067 	 * No need to update load_avg for root_task_group as it is not used.
4068 	 */
4069 	if (cfs_rq->tg == &root_task_group)
4070 		return;
4071 
4072 	if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
4073 		atomic_long_add(delta, &cfs_rq->tg->load_avg);
4074 		cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
4075 	}
4076 }
4077 
4078 /*
4079  * Called within set_task_rq() right before setting a task's CPU. The
4080  * caller only guarantees p->pi_lock is held; no other assumptions,
4081  * including the state of rq->lock, should be made.
4082  */
set_task_rq_fair(struct sched_entity * se,struct cfs_rq * prev,struct cfs_rq * next)4083 void set_task_rq_fair(struct sched_entity *se,
4084 		      struct cfs_rq *prev, struct cfs_rq *next)
4085 {
4086 	u64 p_last_update_time;
4087 	u64 n_last_update_time;
4088 
4089 	if (!sched_feat(ATTACH_AGE_LOAD))
4090 		return;
4091 
4092 	/*
4093 	 * We are supposed to update the task to "current" time, then its up to
4094 	 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
4095 	 * getting what current time is, so simply throw away the out-of-date
4096 	 * time. This will result in the wakee task is less decayed, but giving
4097 	 * the wakee more load sounds not bad.
4098 	 */
4099 	if (!(se->avg.last_update_time && prev))
4100 		return;
4101 
4102 	p_last_update_time = cfs_rq_last_update_time(prev);
4103 	n_last_update_time = cfs_rq_last_update_time(next);
4104 
4105 	__update_load_avg_blocked_se(p_last_update_time, se);
4106 	se->avg.last_update_time = n_last_update_time;
4107 }
4108 
4109 /*
4110  * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
4111  * propagate its contribution. The key to this propagation is the invariant
4112  * that for each group:
4113  *
4114  *   ge->avg == grq->avg						(1)
4115  *
4116  * _IFF_ we look at the pure running and runnable sums. Because they
4117  * represent the very same entity, just at different points in the hierarchy.
4118  *
4119  * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial
4120  * and simply copies the running/runnable sum over (but still wrong, because
4121  * the group entity and group rq do not have their PELT windows aligned).
4122  *
4123  * However, update_tg_cfs_load() is more complex. So we have:
4124  *
4125  *   ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg		(2)
4126  *
4127  * And since, like util, the runnable part should be directly transferable,
4128  * the following would _appear_ to be the straight forward approach:
4129  *
4130  *   grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg	(3)
4131  *
4132  * And per (1) we have:
4133  *
4134  *   ge->avg.runnable_avg == grq->avg.runnable_avg
4135  *
4136  * Which gives:
4137  *
4138  *                      ge->load.weight * grq->avg.load_avg
4139  *   ge->avg.load_avg = -----------------------------------		(4)
4140  *                               grq->load.weight
4141  *
4142  * Except that is wrong!
4143  *
4144  * Because while for entities historical weight is not important and we
4145  * really only care about our future and therefore can consider a pure
4146  * runnable sum, runqueues can NOT do this.
4147  *
4148  * We specifically want runqueues to have a load_avg that includes
4149  * historical weights. Those represent the blocked load, the load we expect
4150  * to (shortly) return to us. This only works by keeping the weights as
4151  * integral part of the sum. We therefore cannot decompose as per (3).
4152  *
4153  * Another reason this doesn't work is that runnable isn't a 0-sum entity.
4154  * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
4155  * rq itself is runnable anywhere between 2/3 and 1 depending on how the
4156  * runnable section of these tasks overlap (or not). If they were to perfectly
4157  * align the rq as a whole would be runnable 2/3 of the time. If however we
4158  * always have at least 1 runnable task, the rq as a whole is always runnable.
4159  *
4160  * So we'll have to approximate.. :/
4161  *
4162  * Given the constraint:
4163  *
4164  *   ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
4165  *
4166  * We can construct a rule that adds runnable to a rq by assuming minimal
4167  * overlap.
4168  *
4169  * On removal, we'll assume each task is equally runnable; which yields:
4170  *
4171  *   grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
4172  *
4173  * XXX: only do this for the part of runnable > running ?
4174  *
4175  */
4176 static inline void
update_tg_cfs_util(struct cfs_rq * cfs_rq,struct sched_entity * se,struct cfs_rq * gcfs_rq)4177 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4178 {
4179 	long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg;
4180 	u32 new_sum, divider;
4181 
4182 	/* Nothing to update */
4183 	if (!delta_avg)
4184 		return;
4185 
4186 	/*
4187 	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4188 	 * See ___update_load_avg() for details.
4189 	 */
4190 	divider = get_pelt_divider(&cfs_rq->avg);
4191 
4192 
4193 	/* Set new sched_entity's utilization */
4194 	se->avg.util_avg = gcfs_rq->avg.util_avg;
4195 	new_sum = se->avg.util_avg * divider;
4196 	delta_sum = (long)new_sum - (long)se->avg.util_sum;
4197 	se->avg.util_sum = new_sum;
4198 
4199 	/* Update parent cfs_rq utilization */
4200 	add_positive(&cfs_rq->avg.util_avg, delta_avg);
4201 	add_positive(&cfs_rq->avg.util_sum, delta_sum);
4202 
4203 	/* See update_cfs_rq_load_avg() */
4204 	cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
4205 					  cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
4206 }
4207 
4208 static inline void
update_tg_cfs_runnable(struct cfs_rq * cfs_rq,struct sched_entity * se,struct cfs_rq * gcfs_rq)4209 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4210 {
4211 	long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
4212 	u32 new_sum, divider;
4213 
4214 	/* Nothing to update */
4215 	if (!delta_avg)
4216 		return;
4217 
4218 	/*
4219 	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4220 	 * See ___update_load_avg() for details.
4221 	 */
4222 	divider = get_pelt_divider(&cfs_rq->avg);
4223 
4224 	/* Set new sched_entity's runnable */
4225 	se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
4226 	new_sum = se->avg.runnable_avg * divider;
4227 	delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
4228 	se->avg.runnable_sum = new_sum;
4229 
4230 	/* Update parent cfs_rq runnable */
4231 	add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
4232 	add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
4233 	/* See update_cfs_rq_load_avg() */
4234 	cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
4235 					      cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
4236 }
4237 
4238 static inline void
update_tg_cfs_load(struct cfs_rq * cfs_rq,struct sched_entity * se,struct cfs_rq * gcfs_rq)4239 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4240 {
4241 	long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
4242 	unsigned long load_avg;
4243 	u64 load_sum = 0;
4244 	s64 delta_sum;
4245 	u32 divider;
4246 
4247 	if (!runnable_sum)
4248 		return;
4249 
4250 	gcfs_rq->prop_runnable_sum = 0;
4251 
4252 	/*
4253 	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4254 	 * See ___update_load_avg() for details.
4255 	 */
4256 	divider = get_pelt_divider(&cfs_rq->avg);
4257 
4258 	if (runnable_sum >= 0) {
4259 		/*
4260 		 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
4261 		 * the CPU is saturated running == runnable.
4262 		 */
4263 		runnable_sum += se->avg.load_sum;
4264 		runnable_sum = min_t(long, runnable_sum, divider);
4265 	} else {
4266 		/*
4267 		 * Estimate the new unweighted runnable_sum of the gcfs_rq by
4268 		 * assuming all tasks are equally runnable.
4269 		 */
4270 		if (scale_load_down(gcfs_rq->load.weight)) {
4271 			load_sum = div_u64(gcfs_rq->avg.load_sum,
4272 				scale_load_down(gcfs_rq->load.weight));
4273 		}
4274 
4275 		/* But make sure to not inflate se's runnable */
4276 		runnable_sum = min(se->avg.load_sum, load_sum);
4277 	}
4278 
4279 	/*
4280 	 * runnable_sum can't be lower than running_sum
4281 	 * Rescale running sum to be in the same range as runnable sum
4282 	 * running_sum is in [0 : LOAD_AVG_MAX <<  SCHED_CAPACITY_SHIFT]
4283 	 * runnable_sum is in [0 : LOAD_AVG_MAX]
4284 	 */
4285 	running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
4286 	runnable_sum = max(runnable_sum, running_sum);
4287 
4288 	load_sum = se_weight(se) * runnable_sum;
4289 	load_avg = div_u64(load_sum, divider);
4290 
4291 	delta_avg = load_avg - se->avg.load_avg;
4292 	if (!delta_avg)
4293 		return;
4294 
4295 	delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
4296 
4297 	se->avg.load_sum = runnable_sum;
4298 	se->avg.load_avg = load_avg;
4299 	add_positive(&cfs_rq->avg.load_avg, delta_avg);
4300 	add_positive(&cfs_rq->avg.load_sum, delta_sum);
4301 	/* See update_cfs_rq_load_avg() */
4302 	cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
4303 					  cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
4304 }
4305 
add_tg_cfs_propagate(struct cfs_rq * cfs_rq,long runnable_sum)4306 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
4307 {
4308 	cfs_rq->propagate = 1;
4309 	cfs_rq->prop_runnable_sum += runnable_sum;
4310 }
4311 
4312 /* Update task and its cfs_rq load average */
propagate_entity_load_avg(struct sched_entity * se)4313 static inline int propagate_entity_load_avg(struct sched_entity *se)
4314 {
4315 	struct cfs_rq *cfs_rq, *gcfs_rq;
4316 
4317 	if (entity_is_task(se))
4318 		return 0;
4319 
4320 	gcfs_rq = group_cfs_rq(se);
4321 	if (!gcfs_rq->propagate)
4322 		return 0;
4323 
4324 	gcfs_rq->propagate = 0;
4325 
4326 	cfs_rq = cfs_rq_of(se);
4327 
4328 	add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
4329 
4330 	update_tg_cfs_util(cfs_rq, se, gcfs_rq);
4331 	update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
4332 	update_tg_cfs_load(cfs_rq, se, gcfs_rq);
4333 
4334 	trace_pelt_cfs_tp(cfs_rq);
4335 	trace_pelt_se_tp(se);
4336 
4337 	return 1;
4338 }
4339 
4340 /*
4341  * Check if we need to update the load and the utilization of a blocked
4342  * group_entity:
4343  */
skip_blocked_update(struct sched_entity * se)4344 static inline bool skip_blocked_update(struct sched_entity *se)
4345 {
4346 	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
4347 
4348 	/*
4349 	 * If sched_entity still have not zero load or utilization, we have to
4350 	 * decay it:
4351 	 */
4352 	if (se->avg.load_avg || se->avg.util_avg)
4353 		return false;
4354 
4355 	/*
4356 	 * If there is a pending propagation, we have to update the load and
4357 	 * the utilization of the sched_entity:
4358 	 */
4359 	if (gcfs_rq->propagate)
4360 		return false;
4361 
4362 	/*
4363 	 * Otherwise, the load and the utilization of the sched_entity is
4364 	 * already zero and there is no pending propagation, so it will be a
4365 	 * waste of time to try to decay it:
4366 	 */
4367 	return true;
4368 }
4369 
4370 #else /* CONFIG_FAIR_GROUP_SCHED */
4371 
update_tg_load_avg(struct cfs_rq * cfs_rq)4372 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
4373 
propagate_entity_load_avg(struct sched_entity * se)4374 static inline int propagate_entity_load_avg(struct sched_entity *se)
4375 {
4376 	return 0;
4377 }
4378 
add_tg_cfs_propagate(struct cfs_rq * cfs_rq,long runnable_sum)4379 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
4380 
4381 #endif /* CONFIG_FAIR_GROUP_SCHED */
4382 
4383 #ifdef CONFIG_NO_HZ_COMMON
migrate_se_pelt_lag(struct sched_entity * se)4384 static inline void migrate_se_pelt_lag(struct sched_entity *se)
4385 {
4386 	u64 throttled = 0, now, lut;
4387 	struct cfs_rq *cfs_rq;
4388 	struct rq *rq;
4389 	bool is_idle;
4390 
4391 	if (load_avg_is_decayed(&se->avg))
4392 		return;
4393 
4394 	cfs_rq = cfs_rq_of(se);
4395 	rq = rq_of(cfs_rq);
4396 
4397 	rcu_read_lock();
4398 	is_idle = is_idle_task(rcu_dereference(rq->curr));
4399 	rcu_read_unlock();
4400 
4401 	/*
4402 	 * The lag estimation comes with a cost we don't want to pay all the
4403 	 * time. Hence, limiting to the case where the source CPU is idle and
4404 	 * we know we are at the greatest risk to have an outdated clock.
4405 	 */
4406 	if (!is_idle)
4407 		return;
4408 
4409 	/*
4410 	 * Estimated "now" is: last_update_time + cfs_idle_lag + rq_idle_lag, where:
4411 	 *
4412 	 *   last_update_time (the cfs_rq's last_update_time)
4413 	 *	= cfs_rq_clock_pelt()@cfs_rq_idle
4414 	 *      = rq_clock_pelt()@cfs_rq_idle
4415 	 *        - cfs->throttled_clock_pelt_time@cfs_rq_idle
4416 	 *
4417 	 *   cfs_idle_lag (delta between rq's update and cfs_rq's update)
4418 	 *      = rq_clock_pelt()@rq_idle - rq_clock_pelt()@cfs_rq_idle
4419 	 *
4420 	 *   rq_idle_lag (delta between now and rq's update)
4421 	 *      = sched_clock_cpu() - rq_clock()@rq_idle
4422 	 *
4423 	 * We can then write:
4424 	 *
4425 	 *    now = rq_clock_pelt()@rq_idle - cfs->throttled_clock_pelt_time +
4426 	 *          sched_clock_cpu() - rq_clock()@rq_idle
4427 	 * Where:
4428 	 *      rq_clock_pelt()@rq_idle is rq->clock_pelt_idle
4429 	 *      rq_clock()@rq_idle      is rq->clock_idle
4430 	 *      cfs->throttled_clock_pelt_time@cfs_rq_idle
4431 	 *                              is cfs_rq->throttled_pelt_idle
4432 	 */
4433 
4434 #ifdef CONFIG_CFS_BANDWIDTH
4435 	throttled = u64_u32_load(cfs_rq->throttled_pelt_idle);
4436 	/* The clock has been stopped for throttling */
4437 	if (throttled == U64_MAX)
4438 		return;
4439 #endif
4440 	now = u64_u32_load(rq->clock_pelt_idle);
4441 	/*
4442 	 * Paired with _update_idle_rq_clock_pelt(). It ensures at the worst case
4443 	 * is observed the old clock_pelt_idle value and the new clock_idle,
4444 	 * which lead to an underestimation. The opposite would lead to an
4445 	 * overestimation.
4446 	 */
4447 	smp_rmb();
4448 	lut = cfs_rq_last_update_time(cfs_rq);
4449 
4450 	now -= throttled;
4451 	if (now < lut)
4452 		/*
4453 		 * cfs_rq->avg.last_update_time is more recent than our
4454 		 * estimation, let's use it.
4455 		 */
4456 		now = lut;
4457 	else
4458 		now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle);
4459 
4460 	__update_load_avg_blocked_se(now, se);
4461 }
4462 #else
migrate_se_pelt_lag(struct sched_entity * se)4463 static void migrate_se_pelt_lag(struct sched_entity *se) {}
4464 #endif
4465 
4466 /**
4467  * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
4468  * @now: current time, as per cfs_rq_clock_pelt()
4469  * @cfs_rq: cfs_rq to update
4470  *
4471  * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
4472  * avg. The immediate corollary is that all (fair) tasks must be attached.
4473  *
4474  * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
4475  *
4476  * Return: true if the load decayed or we removed load.
4477  *
4478  * Since both these conditions indicate a changed cfs_rq->avg.load we should
4479  * call update_tg_load_avg() when this function returns true.
4480  */
4481 static inline int
update_cfs_rq_load_avg(u64 now,struct cfs_rq * cfs_rq)4482 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
4483 {
4484 	unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0;
4485 	struct sched_avg *sa = &cfs_rq->avg;
4486 	int decayed = 0;
4487 
4488 	if (cfs_rq->removed.nr) {
4489 		unsigned long r;
4490 		u32 divider = get_pelt_divider(&cfs_rq->avg);
4491 
4492 		raw_spin_lock(&cfs_rq->removed.lock);
4493 		swap(cfs_rq->removed.util_avg, removed_util);
4494 		swap(cfs_rq->removed.load_avg, removed_load);
4495 		swap(cfs_rq->removed.runnable_avg, removed_runnable);
4496 		cfs_rq->removed.nr = 0;
4497 		raw_spin_unlock(&cfs_rq->removed.lock);
4498 
4499 		r = removed_load;
4500 		sub_positive(&sa->load_avg, r);
4501 		sub_positive(&sa->load_sum, r * divider);
4502 		/* See sa->util_sum below */
4503 		sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
4504 
4505 		r = removed_util;
4506 		sub_positive(&sa->util_avg, r);
4507 		sub_positive(&sa->util_sum, r * divider);
4508 		/*
4509 		 * Because of rounding, se->util_sum might ends up being +1 more than
4510 		 * cfs->util_sum. Although this is not a problem by itself, detaching
4511 		 * a lot of tasks with the rounding problem between 2 updates of
4512 		 * util_avg (~1ms) can make cfs->util_sum becoming null whereas
4513 		 * cfs_util_avg is not.
4514 		 * Check that util_sum is still above its lower bound for the new
4515 		 * util_avg. Given that period_contrib might have moved since the last
4516 		 * sync, we are only sure that util_sum must be above or equal to
4517 		 *    util_avg * minimum possible divider
4518 		 */
4519 		sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
4520 
4521 		r = removed_runnable;
4522 		sub_positive(&sa->runnable_avg, r);
4523 		sub_positive(&sa->runnable_sum, r * divider);
4524 		/* See sa->util_sum above */
4525 		sa->runnable_sum = max_t(u32, sa->runnable_sum,
4526 					      sa->runnable_avg * PELT_MIN_DIVIDER);
4527 
4528 		/*
4529 		 * removed_runnable is the unweighted version of removed_load so we
4530 		 * can use it to estimate removed_load_sum.
4531 		 */
4532 		add_tg_cfs_propagate(cfs_rq,
4533 			-(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
4534 
4535 		decayed = 1;
4536 	}
4537 
4538 	decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
4539 	u64_u32_store_copy(sa->last_update_time,
4540 			   cfs_rq->last_update_time_copy,
4541 			   sa->last_update_time);
4542 	return decayed;
4543 }
4544 
4545 /**
4546  * attach_entity_load_avg - attach this entity to its cfs_rq load avg
4547  * @cfs_rq: cfs_rq to attach to
4548  * @se: sched_entity to attach
4549  *
4550  * Must call update_cfs_rq_load_avg() before this, since we rely on
4551  * cfs_rq->avg.last_update_time being current.
4552  */
attach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)4553 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4554 {
4555 	/*
4556 	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4557 	 * See ___update_load_avg() for details.
4558 	 */
4559 	u32 divider = get_pelt_divider(&cfs_rq->avg);
4560 
4561 	/*
4562 	 * When we attach the @se to the @cfs_rq, we must align the decay
4563 	 * window because without that, really weird and wonderful things can
4564 	 * happen.
4565 	 *
4566 	 * XXX illustrate
4567 	 */
4568 	se->avg.last_update_time = cfs_rq->avg.last_update_time;
4569 	se->avg.period_contrib = cfs_rq->avg.period_contrib;
4570 
4571 	/*
4572 	 * Hell(o) Nasty stuff.. we need to recompute _sum based on the new
4573 	 * period_contrib. This isn't strictly correct, but since we're
4574 	 * entirely outside of the PELT hierarchy, nobody cares if we truncate
4575 	 * _sum a little.
4576 	 */
4577 	se->avg.util_sum = se->avg.util_avg * divider;
4578 
4579 	se->avg.runnable_sum = se->avg.runnable_avg * divider;
4580 
4581 	se->avg.load_sum = se->avg.load_avg * divider;
4582 	if (se_weight(se) < se->avg.load_sum)
4583 		se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
4584 	else
4585 		se->avg.load_sum = 1;
4586 
4587 	trace_android_rvh_attach_entity_load_avg(cfs_rq, se);
4588 
4589 	enqueue_load_avg(cfs_rq, se);
4590 	cfs_rq->avg.util_avg += se->avg.util_avg;
4591 	cfs_rq->avg.util_sum += se->avg.util_sum;
4592 	cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
4593 	cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
4594 
4595 	add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
4596 
4597 	cfs_rq_util_change(cfs_rq, 0);
4598 
4599 	trace_pelt_cfs_tp(cfs_rq);
4600 }
4601 
4602 /**
4603  * detach_entity_load_avg - detach this entity from its cfs_rq load avg
4604  * @cfs_rq: cfs_rq to detach from
4605  * @se: sched_entity to detach
4606  *
4607  * Must call update_cfs_rq_load_avg() before this, since we rely on
4608  * cfs_rq->avg.last_update_time being current.
4609  */
detach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)4610 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4611 {
4612 	trace_android_rvh_detach_entity_load_avg(cfs_rq, se);
4613 
4614 	dequeue_load_avg(cfs_rq, se);
4615 	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
4616 	sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
4617 	/* See update_cfs_rq_load_avg() */
4618 	cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
4619 					  cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
4620 
4621 	sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
4622 	sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
4623 	/* See update_cfs_rq_load_avg() */
4624 	cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
4625 					      cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
4626 
4627 	add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
4628 
4629 	cfs_rq_util_change(cfs_rq, 0);
4630 
4631 	trace_pelt_cfs_tp(cfs_rq);
4632 }
4633 
4634 /*
4635  * Optional action to be done while updating the load average
4636  */
4637 #define UPDATE_TG	0x1
4638 #define SKIP_AGE_LOAD	0x2
4639 #define DO_ATTACH	0x4
4640 #define DO_DETACH	0x8
4641 
4642 /* Update task and its cfs_rq load average */
update_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)4643 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4644 {
4645 	u64 now = cfs_rq_clock_pelt(cfs_rq);
4646 	int decayed;
4647 
4648 	/*
4649 	 * Track task load average for carrying it to new CPU after migrated, and
4650 	 * track group sched_entity load average for task_h_load calc in migration
4651 	 */
4652 	if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
4653 		__update_load_avg_se(now, cfs_rq, se);
4654 
4655 	decayed  = update_cfs_rq_load_avg(now, cfs_rq);
4656 	decayed |= propagate_entity_load_avg(se);
4657 
4658 	trace_android_rvh_update_load_avg(now, cfs_rq, se);
4659 
4660 	if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
4661 
4662 		/*
4663 		 * DO_ATTACH means we're here from enqueue_entity().
4664 		 * !last_update_time means we've passed through
4665 		 * migrate_task_rq_fair() indicating we migrated.
4666 		 *
4667 		 * IOW we're enqueueing a task on a new CPU.
4668 		 */
4669 		attach_entity_load_avg(cfs_rq, se);
4670 		update_tg_load_avg(cfs_rq);
4671 
4672 	} else if (flags & DO_DETACH) {
4673 		/*
4674 		 * DO_DETACH means we're here from dequeue_entity()
4675 		 * and we are migrating task out of the CPU.
4676 		 */
4677 		detach_entity_load_avg(cfs_rq, se);
4678 		update_tg_load_avg(cfs_rq);
4679 	} else if (decayed) {
4680 		cfs_rq_util_change(cfs_rq, 0);
4681 
4682 		if (flags & UPDATE_TG)
4683 			update_tg_load_avg(cfs_rq);
4684 	}
4685 }
4686 
4687 /*
4688  * Synchronize entity load avg of dequeued entity without locking
4689  * the previous rq.
4690  */
sync_entity_load_avg(struct sched_entity * se)4691 static void sync_entity_load_avg(struct sched_entity *se)
4692 {
4693 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
4694 	u64 last_update_time;
4695 
4696 	last_update_time = cfs_rq_last_update_time(cfs_rq);
4697 	__update_load_avg_blocked_se(last_update_time, se);
4698 }
4699 
4700 /*
4701  * Task first catches up with cfs_rq, and then subtract
4702  * itself from the cfs_rq (task must be off the queue now).
4703  */
remove_entity_load_avg(struct sched_entity * se)4704 static void remove_entity_load_avg(struct sched_entity *se)
4705 {
4706 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
4707 	unsigned long flags;
4708 
4709 	/*
4710 	 * tasks cannot exit without having gone through wake_up_new_task() ->
4711 	 * enqueue_task_fair() which will have added things to the cfs_rq,
4712 	 * so we can remove unconditionally.
4713 	 */
4714 
4715 	sync_entity_load_avg(se);
4716 
4717 	trace_android_rvh_remove_entity_load_avg(cfs_rq, se);
4718 
4719 	raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
4720 	++cfs_rq->removed.nr;
4721 	cfs_rq->removed.util_avg	+= se->avg.util_avg;
4722 	cfs_rq->removed.load_avg	+= se->avg.load_avg;
4723 	cfs_rq->removed.runnable_avg	+= se->avg.runnable_avg;
4724 	raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
4725 }
4726 
cfs_rq_runnable_avg(struct cfs_rq * cfs_rq)4727 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq)
4728 {
4729 	return cfs_rq->avg.runnable_avg;
4730 }
4731 
cfs_rq_load_avg(struct cfs_rq * cfs_rq)4732 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
4733 {
4734 	return cfs_rq->avg.load_avg;
4735 }
4736 
4737 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
4738 
task_util(struct task_struct * p)4739 static inline unsigned long task_util(struct task_struct *p)
4740 {
4741 	return READ_ONCE(p->se.avg.util_avg);
4742 }
4743 
task_runnable(struct task_struct * p)4744 static inline unsigned long task_runnable(struct task_struct *p)
4745 {
4746 	return READ_ONCE(p->se.avg.runnable_avg);
4747 }
4748 
_task_util_est(struct task_struct * p)4749 static inline unsigned long _task_util_est(struct task_struct *p)
4750 {
4751 	return READ_ONCE(p->se.avg.util_est) & ~UTIL_AVG_UNCHANGED;
4752 }
4753 
task_util_est(struct task_struct * p)4754 static inline unsigned long task_util_est(struct task_struct *p)
4755 {
4756 	return max(task_util(p), _task_util_est(p));
4757 }
4758 
util_est_enqueue(struct cfs_rq * cfs_rq,struct task_struct * p)4759 static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
4760 				    struct task_struct *p)
4761 {
4762 	unsigned int enqueued;
4763 
4764 	if (!sched_feat(UTIL_EST))
4765 		return;
4766 
4767 	/* Update root cfs_rq's estimated utilization */
4768 	enqueued  = cfs_rq->avg.util_est;
4769 	enqueued += _task_util_est(p);
4770 	WRITE_ONCE(cfs_rq->avg.util_est, enqueued);
4771 
4772 	trace_sched_util_est_cfs_tp(cfs_rq);
4773 }
4774 
util_est_dequeue(struct cfs_rq * cfs_rq,struct task_struct * p)4775 static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
4776 				    struct task_struct *p)
4777 {
4778 	unsigned int enqueued;
4779 
4780 	if (!sched_feat(UTIL_EST))
4781 		return;
4782 
4783 	/* Update root cfs_rq's estimated utilization */
4784 	enqueued  = cfs_rq->avg.util_est;
4785 	enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
4786 	WRITE_ONCE(cfs_rq->avg.util_est, enqueued);
4787 
4788 	trace_sched_util_est_cfs_tp(cfs_rq);
4789 }
4790 
4791 #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
4792 
util_est_update(struct cfs_rq * cfs_rq,struct task_struct * p,bool task_sleep)4793 static inline void util_est_update(struct cfs_rq *cfs_rq,
4794 				   struct task_struct *p,
4795 				   bool task_sleep)
4796 {
4797 	unsigned int ewma, dequeued, last_ewma_diff;
4798 	int ret = 0;
4799 
4800 	trace_android_rvh_util_est_update(cfs_rq, p, task_sleep, &ret);
4801 	if (ret)
4802 		return;
4803 
4804 	if (!sched_feat(UTIL_EST))
4805 		return;
4806 
4807 	/*
4808 	 * Skip update of task's estimated utilization when the task has not
4809 	 * yet completed an activation, e.g. being migrated.
4810 	 */
4811 	if (!task_sleep)
4812 		return;
4813 
4814 	/* Get current estimate of utilization */
4815 	ewma = READ_ONCE(p->se.avg.util_est);
4816 
4817 	/*
4818 	 * If the PELT values haven't changed since enqueue time,
4819 	 * skip the util_est update.
4820 	 */
4821 	if (ewma & UTIL_AVG_UNCHANGED)
4822 		return;
4823 
4824 	/* Get utilization at dequeue */
4825 	dequeued = task_util(p);
4826 
4827 	/*
4828 	 * Reset EWMA on utilization increases, the moving average is used only
4829 	 * to smooth utilization decreases.
4830 	 */
4831 	if (ewma <= dequeued) {
4832 		ewma = dequeued;
4833 		goto done;
4834 	}
4835 
4836 	/*
4837 	 * Skip update of task's estimated utilization when its members are
4838 	 * already ~1% close to its last activation value.
4839 	 */
4840 	last_ewma_diff = ewma - dequeued;
4841 	if (last_ewma_diff < UTIL_EST_MARGIN)
4842 		goto done;
4843 
4844 	/*
4845 	 * To avoid overestimation of actual task utilization, skip updates if
4846 	 * we cannot grant there is idle time in this CPU.
4847 	 */
4848 	if (dequeued > arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))))
4849 		return;
4850 
4851 	/*
4852 	 * To avoid underestimate of task utilization, skip updates of EWMA if
4853 	 * we cannot grant that thread got all CPU time it wanted.
4854 	 */
4855 	if ((dequeued + UTIL_EST_MARGIN) < task_runnable(p))
4856 		goto done;
4857 
4858 
4859 	/*
4860 	 * Update Task's estimated utilization
4861 	 *
4862 	 * When *p completes an activation we can consolidate another sample
4863 	 * of the task size. This is done by using this value to update the
4864 	 * Exponential Weighted Moving Average (EWMA):
4865 	 *
4866 	 *  ewma(t) = w *  task_util(p) + (1-w) * ewma(t-1)
4867 	 *          = w *  task_util(p) +         ewma(t-1)  - w * ewma(t-1)
4868 	 *          = w * (task_util(p) -         ewma(t-1)) +     ewma(t-1)
4869 	 *          = w * (      -last_ewma_diff           ) +     ewma(t-1)
4870 	 *          = w * (-last_ewma_diff +  ewma(t-1) / w)
4871 	 *
4872 	 * Where 'w' is the weight of new samples, which is configured to be
4873 	 * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
4874 	 */
4875 	ewma <<= UTIL_EST_WEIGHT_SHIFT;
4876 	ewma  -= last_ewma_diff;
4877 	ewma >>= UTIL_EST_WEIGHT_SHIFT;
4878 done:
4879 	ewma |= UTIL_AVG_UNCHANGED;
4880 	WRITE_ONCE(p->se.avg.util_est, ewma);
4881 
4882 	trace_sched_util_est_se_tp(&p->se);
4883 }
4884 
util_fits_cpu(unsigned long util,unsigned long uclamp_min,unsigned long uclamp_max,int cpu)4885 static inline int util_fits_cpu(unsigned long util,
4886 				unsigned long uclamp_min,
4887 				unsigned long uclamp_max,
4888 				int cpu)
4889 {
4890 	unsigned long capacity_orig, capacity_orig_thermal;
4891 	unsigned long capacity = capacity_of(cpu);
4892 	bool fits, uclamp_max_fits, done = false;
4893 
4894 	trace_android_rvh_util_fits_cpu(util, uclamp_min, uclamp_max, cpu, &fits, &done);
4895 
4896 	if (done)
4897 		return fits;
4898 
4899 	/*
4900 	 * Check if the real util fits without any uclamp boost/cap applied.
4901 	 */
4902 	fits = fits_capacity(util, capacity);
4903 
4904 	if (!uclamp_is_used())
4905 		return fits;
4906 
4907 	/*
4908 	 * We must use capacity_orig_of() for comparing against uclamp_min and
4909 	 * uclamp_max. We only care about capacity pressure (by using
4910 	 * capacity_of()) for comparing against the real util.
4911 	 *
4912 	 * If a task is boosted to 1024 for example, we don't want a tiny
4913 	 * pressure to skew the check whether it fits a CPU or not.
4914 	 *
4915 	 * Similarly if a task is capped to capacity_orig_of(little_cpu), it
4916 	 * should fit a little cpu even if there's some pressure.
4917 	 *
4918 	 * Only exception is for thermal pressure since it has a direct impact
4919 	 * on available OPP of the system.
4920 	 *
4921 	 * We honour it for uclamp_min only as a drop in performance level
4922 	 * could result in not getting the requested minimum performance level.
4923 	 *
4924 	 * For uclamp_max, we can tolerate a drop in performance level as the
4925 	 * goal is to cap the task. So it's okay if it's getting less.
4926 	 */
4927 	capacity_orig = capacity_orig_of(cpu);
4928 	capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
4929 
4930 	/*
4931 	 * We want to force a task to fit a cpu as implied by uclamp_max.
4932 	 * But we do have some corner cases to cater for..
4933 	 *
4934 	 *
4935 	 *                                 C=z
4936 	 *   |                             ___
4937 	 *   |                  C=y       |   |
4938 	 *   |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _  uclamp_max
4939 	 *   |      C=x        |   |      |   |
4940 	 *   |      ___        |   |      |   |
4941 	 *   |     |   |       |   |      |   |    (util somewhere in this region)
4942 	 *   |     |   |       |   |      |   |
4943 	 *   |     |   |       |   |      |   |
4944 	 *   +----------------------------------------
4945 	 *         cpu0        cpu1       cpu2
4946 	 *
4947 	 *   In the above example if a task is capped to a specific performance
4948 	 *   point, y, then when:
4949 	 *
4950 	 *   * util = 80% of x then it does not fit on cpu0 and should migrate
4951 	 *     to cpu1
4952 	 *   * util = 80% of y then it is forced to fit on cpu1 to honour
4953 	 *     uclamp_max request.
4954 	 *
4955 	 *   which is what we're enforcing here. A task always fits if
4956 	 *   uclamp_max <= capacity_orig. But when uclamp_max > capacity_orig,
4957 	 *   the normal upmigration rules should withhold still.
4958 	 *
4959 	 *   Only exception is when we are on max capacity, then we need to be
4960 	 *   careful not to block overutilized state. This is so because:
4961 	 *
4962 	 *     1. There's no concept of capping at max_capacity! We can't go
4963 	 *        beyond this performance level anyway.
4964 	 *     2. The system is being saturated when we're operating near
4965 	 *        max capacity, it doesn't make sense to block overutilized.
4966 	 */
4967 	uclamp_max_fits = (capacity_orig == SCHED_CAPACITY_SCALE) && (uclamp_max == SCHED_CAPACITY_SCALE);
4968 	uclamp_max_fits = !uclamp_max_fits && (uclamp_max <= capacity_orig);
4969 	fits = fits || uclamp_max_fits;
4970 
4971 	/*
4972 	 *
4973 	 *                                 C=z
4974 	 *   |                             ___       (region a, capped, util >= uclamp_max)
4975 	 *   |                  C=y       |   |
4976 	 *   |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
4977 	 *   |      C=x        |   |      |   |
4978 	 *   |      ___        |   |      |   |      (region b, uclamp_min <= util <= uclamp_max)
4979 	 *   |_ _ _|_ _|_ _ _ _| _ | _ _ _| _ | _ _ _ _ _ uclamp_min
4980 	 *   |     |   |       |   |      |   |
4981 	 *   |     |   |       |   |      |   |      (region c, boosted, util < uclamp_min)
4982 	 *   +----------------------------------------
4983 	 *         cpu0        cpu1       cpu2
4984 	 *
4985 	 * a) If util > uclamp_max, then we're capped, we don't care about
4986 	 *    actual fitness value here. We only care if uclamp_max fits
4987 	 *    capacity without taking margin/pressure into account.
4988 	 *    See comment above.
4989 	 *
4990 	 * b) If uclamp_min <= util <= uclamp_max, then the normal
4991 	 *    fits_capacity() rules apply. Except we need to ensure that we
4992 	 *    enforce we remain within uclamp_max, see comment above.
4993 	 *
4994 	 * c) If util < uclamp_min, then we are boosted. Same as (b) but we
4995 	 *    need to take into account the boosted value fits the CPU without
4996 	 *    taking margin/pressure into account.
4997 	 *
4998 	 * Cases (a) and (b) are handled in the 'fits' variable already. We
4999 	 * just need to consider an extra check for case (c) after ensuring we
5000 	 * handle the case uclamp_min > uclamp_max.
5001 	 */
5002 	uclamp_min = min(uclamp_min, uclamp_max);
5003 	if (fits && (util < uclamp_min) && (uclamp_min > capacity_orig_thermal))
5004 		return -1;
5005 
5006 	return fits;
5007 }
5008 
task_fits_cpu(struct task_struct * p,int cpu)5009 static inline int task_fits_cpu(struct task_struct *p, int cpu)
5010 {
5011 	unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN);
5012 	unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX);
5013 	unsigned long util = task_util_est(p);
5014 	/*
5015 	 * Return true only if the cpu fully fits the task requirements, which
5016 	 * include the utilization but also the performance hints.
5017 	 */
5018 	return (util_fits_cpu(util, uclamp_min, uclamp_max, cpu) > 0);
5019 }
5020 
is_misfit_task(struct task_struct * p,struct rq * rq,misfit_reason_t * reason)5021 static inline int is_misfit_task(struct task_struct *p, struct rq *rq,
5022 				 misfit_reason_t *reason)
5023 {
5024 	if (!p || p->nr_cpus_allowed == 1)
5025 		return 0;
5026 
5027 	if (task_fits_cpu(p, cpu_of(rq)))
5028 		return 0;
5029 
5030 	if (reason)
5031 		*reason = MISFIT_PERF;
5032 
5033 	return 1;
5034 }
5035 
update_misfit_status(struct task_struct * p,struct rq * rq)5036 inline void update_misfit_status(struct task_struct *p, struct rq *rq)
5037 {
5038 	bool need_update = true;
5039 	misfit_reason_t reason;
5040 
5041 	trace_android_rvh_update_misfit_status(p, rq, &need_update);
5042 	if (!sched_asym_cpucap_active() || !need_update)
5043 		return;
5044 
5045 	if (!is_misfit_task(p, rq, &reason)) {
5046 		rq->misfit_task_load = 0;
5047 		rq->misfit_reason = -1;
5048 		return;
5049 	}
5050 
5051 	/*
5052 	 * Make sure that misfit_task_load will not be null even if
5053 	 * task_h_load() returns 0.
5054 	 */
5055 	rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
5056 	rq->misfit_reason = reason;
5057 }
5058 EXPORT_SYMBOL_GPL(update_misfit_status);
5059 
5060 #else /* CONFIG_SMP */
5061 
cfs_rq_is_decayed(struct cfs_rq * cfs_rq)5062 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
5063 {
5064 	return !cfs_rq->nr_running;
5065 }
5066 
5067 #define UPDATE_TG	0x0
5068 #define SKIP_AGE_LOAD	0x0
5069 #define DO_ATTACH	0x0
5070 #define DO_DETACH	0x0
5071 
update_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se,int not_used1)5072 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
5073 {
5074 	cfs_rq_util_change(cfs_rq, 0);
5075 }
5076 
remove_entity_load_avg(struct sched_entity * se)5077 static inline void remove_entity_load_avg(struct sched_entity *se) {}
5078 
5079 static inline void
attach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)5080 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
5081 static inline void
detach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)5082 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
5083 
newidle_balance(struct rq * rq,struct rq_flags * rf)5084 static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
5085 {
5086 	return 0;
5087 }
5088 
5089 static inline void
util_est_enqueue(struct cfs_rq * cfs_rq,struct task_struct * p)5090 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
5091 
5092 static inline void
util_est_dequeue(struct cfs_rq * cfs_rq,struct task_struct * p)5093 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
5094 
5095 static inline void
util_est_update(struct cfs_rq * cfs_rq,struct task_struct * p,bool task_sleep)5096 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
5097 		bool task_sleep) {}
update_misfit_status(struct task_struct * p,struct rq * rq)5098 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
5099 
5100 #endif /* CONFIG_SMP */
5101 
5102 static void
place_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)5103 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5104 {
5105 	u64 vslice, vruntime = avg_vruntime(cfs_rq);
5106 	s64 lag = 0;
5107 
5108 	se->slice = sysctl_sched_base_slice;
5109 	vslice = calc_delta_fair(se->slice, se);
5110 
5111 	/*
5112 	 * Due to how V is constructed as the weighted average of entities,
5113 	 * adding tasks with positive lag, or removing tasks with negative lag
5114 	 * will move 'time' backwards, this can screw around with the lag of
5115 	 * other tasks.
5116 	 *
5117 	 * EEVDF: placement strategy #1 / #2
5118 	 */
5119 	if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
5120 		struct sched_entity *curr = cfs_rq->curr;
5121 		unsigned long load;
5122 
5123 		lag = se->vlag;
5124 
5125 		/*
5126 		 * If we want to place a task and preserve lag, we have to
5127 		 * consider the effect of the new entity on the weighted
5128 		 * average and compensate for this, otherwise lag can quickly
5129 		 * evaporate.
5130 		 *
5131 		 * Lag is defined as:
5132 		 *
5133 		 *   lag_i = S - s_i = w_i * (V - v_i)
5134 		 *
5135 		 * To avoid the 'w_i' term all over the place, we only track
5136 		 * the virtual lag:
5137 		 *
5138 		 *   vl_i = V - v_i <=> v_i = V - vl_i
5139 		 *
5140 		 * And we take V to be the weighted average of all v:
5141 		 *
5142 		 *   V = (\Sum w_j*v_j) / W
5143 		 *
5144 		 * Where W is: \Sum w_j
5145 		 *
5146 		 * Then, the weighted average after adding an entity with lag
5147 		 * vl_i is given by:
5148 		 *
5149 		 *   V' = (\Sum w_j*v_j + w_i*v_i) / (W + w_i)
5150 		 *      = (W*V + w_i*(V - vl_i)) / (W + w_i)
5151 		 *      = (W*V + w_i*V - w_i*vl_i) / (W + w_i)
5152 		 *      = (V*(W + w_i) - w_i*l) / (W + w_i)
5153 		 *      = V - w_i*vl_i / (W + w_i)
5154 		 *
5155 		 * And the actual lag after adding an entity with vl_i is:
5156 		 *
5157 		 *   vl'_i = V' - v_i
5158 		 *         = V - w_i*vl_i / (W + w_i) - (V - vl_i)
5159 		 *         = vl_i - w_i*vl_i / (W + w_i)
5160 		 *
5161 		 * Which is strictly less than vl_i. So in order to preserve lag
5162 		 * we should inflate the lag before placement such that the
5163 		 * effective lag after placement comes out right.
5164 		 *
5165 		 * As such, invert the above relation for vl'_i to get the vl_i
5166 		 * we need to use such that the lag after placement is the lag
5167 		 * we computed before dequeue.
5168 		 *
5169 		 *   vl'_i = vl_i - w_i*vl_i / (W + w_i)
5170 		 *         = ((W + w_i)*vl_i - w_i*vl_i) / (W + w_i)
5171 		 *
5172 		 *   (W + w_i)*vl'_i = (W + w_i)*vl_i - w_i*vl_i
5173 		 *                   = W*vl_i
5174 		 *
5175 		 *   vl_i = (W + w_i)*vl'_i / W
5176 		 */
5177 		load = cfs_rq->avg_load;
5178 		if (curr && curr->on_rq)
5179 			load += scale_load_down(curr->load.weight);
5180 
5181 		lag *= load + scale_load_down(se->load.weight);
5182 		if (WARN_ON_ONCE(!load))
5183 			load = 1;
5184 		lag = div_s64(lag, load);
5185 	}
5186 
5187 	se->vruntime = vruntime - lag;
5188 
5189 	/*
5190 	 * When joining the competition; the exisiting tasks will be,
5191 	 * on average, halfway through their slice, as such start tasks
5192 	 * off with half a slice to ease into the competition.
5193 	 */
5194 	if (sched_feat(PLACE_DEADLINE_INITIAL) && (flags & ENQUEUE_INITIAL))
5195 		vslice /= 2;
5196 
5197 	/*
5198 	 * EEVDF: vd_i = ve_i + r_i/w_i
5199 	 */
5200 	se->deadline = se->vruntime + vslice;
5201 	trace_android_rvh_place_entity(cfs_rq, se, flags, &vruntime);
5202 }
5203 
5204 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
5205 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
5206 
5207 static inline bool cfs_bandwidth_used(void);
5208 
5209 static void
enqueue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)5210 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5211 {
5212 	bool curr = cfs_rq->curr == se;
5213 
5214 	/*
5215 	 * If we're the current task, we must renormalise before calling
5216 	 * update_curr().
5217 	 */
5218 	if (curr)
5219 		place_entity(cfs_rq, se, flags);
5220 
5221 	update_curr(cfs_rq);
5222 
5223 	/*
5224 	 * When enqueuing a sched_entity, we must:
5225 	 *   - Update loads to have both entity and cfs_rq synced with now.
5226 	 *   - For group_entity, update its runnable_weight to reflect the new
5227 	 *     h_nr_running of its group cfs_rq.
5228 	 *   - For group_entity, update its weight to reflect the new share of
5229 	 *     its group cfs_rq
5230 	 *   - Add its new weight to cfs_rq->load.weight
5231 	 */
5232 	update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
5233 	se_update_runnable(se);
5234 	/*
5235 	 * XXX update_load_avg() above will have attached us to the pelt sum;
5236 	 * but update_cfs_group() here will re-adjust the weight and have to
5237 	 * undo/redo all that. Seems wasteful.
5238 	 */
5239 	update_cfs_group(se);
5240 
5241 	/*
5242 	 * XXX now that the entity has been re-weighted, and it's lag adjusted,
5243 	 * we can place the entity.
5244 	 */
5245 	if (!curr)
5246 		place_entity(cfs_rq, se, flags);
5247 
5248 	account_entity_enqueue(cfs_rq, se);
5249 
5250 	/* Entity has migrated, no longer consider this task hot */
5251 	if (flags & ENQUEUE_MIGRATED)
5252 		se->exec_start = 0;
5253 
5254 	check_schedstat_required();
5255 	update_stats_enqueue_fair(cfs_rq, se, flags);
5256 	if (!curr)
5257 		__enqueue_entity(cfs_rq, se);
5258 	se->on_rq = 1;
5259 
5260 	if (cfs_rq->nr_running == 1) {
5261 		check_enqueue_throttle(cfs_rq);
5262 		if (!throttled_hierarchy(cfs_rq)) {
5263 			list_add_leaf_cfs_rq(cfs_rq);
5264 		} else {
5265 #ifdef CONFIG_CFS_BANDWIDTH
5266 			struct rq *rq = rq_of(cfs_rq);
5267 
5268 			if (cfs_rq_throttled(cfs_rq) && !cfs_rq->throttled_clock)
5269 				cfs_rq->throttled_clock = rq_clock(rq);
5270 			if (!cfs_rq->throttled_clock_self)
5271 				cfs_rq->throttled_clock_self = rq_clock(rq);
5272 #endif
5273 		}
5274 	}
5275 }
5276 
__clear_buddies_next(struct sched_entity * se)5277 static void __clear_buddies_next(struct sched_entity *se)
5278 {
5279 	for_each_sched_entity(se) {
5280 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
5281 		if (cfs_rq->next != se)
5282 			break;
5283 
5284 		cfs_rq->next = NULL;
5285 	}
5286 }
5287 
clear_buddies(struct cfs_rq * cfs_rq,struct sched_entity * se)5288 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
5289 {
5290 	if (cfs_rq->next == se)
5291 		__clear_buddies_next(se);
5292 }
5293 
5294 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
5295 
5296 static void
dequeue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)5297 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5298 {
5299 	int action = UPDATE_TG;
5300 
5301 	if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
5302 		action |= DO_DETACH;
5303 
5304 	/*
5305 	 * Update run-time statistics of the 'current'.
5306 	 */
5307 	update_curr(cfs_rq);
5308 
5309 	/*
5310 	 * When dequeuing a sched_entity, we must:
5311 	 *   - Update loads to have both entity and cfs_rq synced with now.
5312 	 *   - For group_entity, update its runnable_weight to reflect the new
5313 	 *     h_nr_running of its group cfs_rq.
5314 	 *   - Subtract its previous weight from cfs_rq->load.weight.
5315 	 *   - For group entity, update its weight to reflect the new share
5316 	 *     of its group cfs_rq.
5317 	 */
5318 	update_load_avg(cfs_rq, se, action);
5319 	se_update_runnable(se);
5320 
5321 	update_stats_dequeue_fair(cfs_rq, se, flags);
5322 
5323 	clear_buddies(cfs_rq, se);
5324 
5325 	update_entity_lag(cfs_rq, se);
5326 	if (se != cfs_rq->curr)
5327 		__dequeue_entity(cfs_rq, se);
5328 	se->on_rq = 0;
5329 	account_entity_dequeue(cfs_rq, se);
5330 
5331 	/* return excess runtime on last dequeue */
5332 	return_cfs_rq_runtime(cfs_rq);
5333 
5334 	update_cfs_group(se);
5335 
5336 	/*
5337 	 * Now advance min_vruntime if @se was the entity holding it back,
5338 	 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
5339 	 * put back on, and if we advance min_vruntime, we'll be placed back
5340 	 * further than we started -- ie. we'll be penalized.
5341 	 */
5342 	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
5343 		update_min_vruntime(cfs_rq);
5344 
5345 	if (cfs_rq->nr_running == 0)
5346 		update_idle_cfs_rq_clock_pelt(cfs_rq);
5347 }
5348 
set_next_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)5349 void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
5350 {
5351 	clear_buddies(cfs_rq, se);
5352 
5353 	/* 'current' is not kept within the tree. */
5354 	if (se->on_rq) {
5355 		/*
5356 		 * Any task has to be enqueued before it get to execute on
5357 		 * a CPU. So account for the time it spent waiting on the
5358 		 * runqueue.
5359 		 */
5360 		update_stats_wait_end_fair(cfs_rq, se);
5361 		__dequeue_entity(cfs_rq, se);
5362 		update_load_avg(cfs_rq, se, UPDATE_TG);
5363 		/*
5364 		 * HACK, stash a copy of deadline at the point of pick in vlag,
5365 		 * which isn't used until dequeue.
5366 		 */
5367 		se->vlag = se->deadline;
5368 	}
5369 
5370 	update_stats_curr_start(cfs_rq, se);
5371 	cfs_rq->curr = se;
5372 
5373 	/*
5374 	 * Track our maximum slice length, if the CPU's load is at
5375 	 * least twice that of our own weight (i.e. dont track it
5376 	 * when there are only lesser-weight tasks around):
5377 	 */
5378 	if (schedstat_enabled() &&
5379 	    rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
5380 		struct sched_statistics *stats;
5381 
5382 		stats = __schedstats_from_se(se);
5383 		__schedstat_set(stats->slice_max,
5384 				max((u64)stats->slice_max,
5385 				    se->sum_exec_runtime - se->prev_sum_exec_runtime));
5386 	}
5387 
5388 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
5389 }
5390 EXPORT_SYMBOL_GPL(set_next_entity);
5391 
5392 /*
5393  * Pick the next process, keeping these things in mind, in this order:
5394  * 1) keep things fair between processes/task groups
5395  * 2) pick the "next" process, since someone really wants that to run
5396  * 3) pick the "last" process, for cache locality
5397  * 4) do not run the "skip" process, if something else is available
5398  */
5399 static struct sched_entity *
pick_next_entity(struct cfs_rq * cfs_rq,struct sched_entity * curr)5400 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
5401 {
5402 	/*
5403 	 * Enabling NEXT_BUDDY will affect latency but not fairness.
5404 	 */
5405 	if (sched_feat(NEXT_BUDDY) &&
5406 	    cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
5407 		return cfs_rq->next;
5408 
5409 	return pick_eevdf(cfs_rq);
5410 }
5411 
5412 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
5413 
put_prev_entity(struct cfs_rq * cfs_rq,struct sched_entity * prev)5414 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
5415 {
5416 	/*
5417 	 * If still on the runqueue then deactivate_task()
5418 	 * was not called and update_curr() has to be done:
5419 	 */
5420 	if (prev->on_rq)
5421 		update_curr(cfs_rq);
5422 
5423 	/* throttle cfs_rqs exceeding runtime */
5424 	check_cfs_rq_runtime(cfs_rq);
5425 
5426 	if (prev->on_rq) {
5427 		update_stats_wait_start_fair(cfs_rq, prev);
5428 		/* Put 'current' back into the tree. */
5429 		__enqueue_entity(cfs_rq, prev);
5430 		/* in !on_rq case, update occurred at dequeue */
5431 		update_load_avg(cfs_rq, prev, 0);
5432 	}
5433 	cfs_rq->curr = NULL;
5434 }
5435 
5436 static void
entity_tick(struct cfs_rq * cfs_rq,struct sched_entity * curr,int queued)5437 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
5438 {
5439 	/*
5440 	 * Update run-time statistics of the 'current'.
5441 	 */
5442 	update_curr(cfs_rq);
5443 
5444 	/*
5445 	 * Ensure that runnable average is periodically updated.
5446 	 */
5447 	update_load_avg(cfs_rq, curr, UPDATE_TG);
5448 	update_cfs_group(curr);
5449 
5450 #ifdef CONFIG_SCHED_HRTICK
5451 	/*
5452 	 * queued ticks are scheduled to match the slice, so don't bother
5453 	 * validating it and just reschedule.
5454 	 */
5455 	if (queued) {
5456 		resched_curr(rq_of(cfs_rq));
5457 		return;
5458 	}
5459 	/*
5460 	 * don't let the period tick interfere with the hrtick preemption
5461 	 */
5462 	if (!sched_feat(DOUBLE_TICK) &&
5463 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
5464 		return;
5465 #endif
5466 	trace_android_rvh_entity_tick(cfs_rq, curr);
5467 }
5468 
5469 
5470 /**************************************************
5471  * CFS bandwidth control machinery
5472  */
5473 
5474 #ifdef CONFIG_CFS_BANDWIDTH
5475 
5476 #ifdef CONFIG_JUMP_LABEL
5477 static struct static_key __cfs_bandwidth_used;
5478 
cfs_bandwidth_used(void)5479 static inline bool cfs_bandwidth_used(void)
5480 {
5481 	return static_key_false(&__cfs_bandwidth_used);
5482 }
5483 
cfs_bandwidth_usage_inc(void)5484 void cfs_bandwidth_usage_inc(void)
5485 {
5486 	static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
5487 }
5488 
cfs_bandwidth_usage_dec(void)5489 void cfs_bandwidth_usage_dec(void)
5490 {
5491 	static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
5492 }
5493 #else /* CONFIG_JUMP_LABEL */
cfs_bandwidth_used(void)5494 static bool cfs_bandwidth_used(void)
5495 {
5496 	return true;
5497 }
5498 
cfs_bandwidth_usage_inc(void)5499 void cfs_bandwidth_usage_inc(void) {}
cfs_bandwidth_usage_dec(void)5500 void cfs_bandwidth_usage_dec(void) {}
5501 #endif /* CONFIG_JUMP_LABEL */
5502 
5503 /*
5504  * default period for cfs group bandwidth.
5505  * default: 0.1s, units: nanoseconds
5506  */
default_cfs_period(void)5507 static inline u64 default_cfs_period(void)
5508 {
5509 	return 100000000ULL;
5510 }
5511 
sched_cfs_bandwidth_slice(void)5512 static inline u64 sched_cfs_bandwidth_slice(void)
5513 {
5514 	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
5515 }
5516 
5517 /*
5518  * Replenish runtime according to assigned quota. We use sched_clock_cpu
5519  * directly instead of rq->clock to avoid adding additional synchronization
5520  * around rq->lock.
5521  *
5522  * requires cfs_b->lock
5523  */
__refill_cfs_bandwidth_runtime(struct cfs_bandwidth * cfs_b)5524 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
5525 {
5526 	s64 runtime;
5527 
5528 	if (unlikely(cfs_b->quota == RUNTIME_INF))
5529 		return;
5530 
5531 	cfs_b->runtime += cfs_b->quota;
5532 	runtime = cfs_b->runtime_snap - cfs_b->runtime;
5533 	if (runtime > 0) {
5534 		cfs_b->burst_time += runtime;
5535 		cfs_b->nr_burst++;
5536 	}
5537 
5538 	cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
5539 	cfs_b->runtime_snap = cfs_b->runtime;
5540 }
5541 
tg_cfs_bandwidth(struct task_group * tg)5542 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
5543 {
5544 	return &tg->cfs_bandwidth;
5545 }
5546 
5547 /* returns 0 on failure to allocate runtime */
__assign_cfs_rq_runtime(struct cfs_bandwidth * cfs_b,struct cfs_rq * cfs_rq,u64 target_runtime)5548 static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b,
5549 				   struct cfs_rq *cfs_rq, u64 target_runtime)
5550 {
5551 	u64 min_amount, amount = 0;
5552 
5553 	lockdep_assert_held(&cfs_b->lock);
5554 
5555 	/* note: this is a positive sum as runtime_remaining <= 0 */
5556 	min_amount = target_runtime - cfs_rq->runtime_remaining;
5557 
5558 	if (cfs_b->quota == RUNTIME_INF)
5559 		amount = min_amount;
5560 	else {
5561 		start_cfs_bandwidth(cfs_b);
5562 
5563 		if (cfs_b->runtime > 0) {
5564 			amount = min(cfs_b->runtime, min_amount);
5565 			cfs_b->runtime -= amount;
5566 			cfs_b->idle = 0;
5567 		}
5568 	}
5569 
5570 	cfs_rq->runtime_remaining += amount;
5571 
5572 	return cfs_rq->runtime_remaining > 0;
5573 }
5574 
5575 /* returns 0 on failure to allocate runtime */
assign_cfs_rq_runtime(struct cfs_rq * cfs_rq)5576 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5577 {
5578 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5579 	int ret;
5580 
5581 	raw_spin_lock(&cfs_b->lock);
5582 	ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
5583 	raw_spin_unlock(&cfs_b->lock);
5584 
5585 	return ret;
5586 }
5587 
__account_cfs_rq_runtime(struct cfs_rq * cfs_rq,u64 delta_exec)5588 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
5589 {
5590 	/* dock delta_exec before expiring quota (as it could span periods) */
5591 	cfs_rq->runtime_remaining -= delta_exec;
5592 
5593 	if (likely(cfs_rq->runtime_remaining > 0))
5594 		return;
5595 
5596 	if (cfs_rq->throttled)
5597 		return;
5598 	/*
5599 	 * if we're unable to extend our runtime we resched so that the active
5600 	 * hierarchy can be throttled
5601 	 */
5602 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
5603 		resched_curr(rq_of(cfs_rq));
5604 }
5605 
5606 static __always_inline
account_cfs_rq_runtime(struct cfs_rq * cfs_rq,u64 delta_exec)5607 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
5608 {
5609 	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
5610 		return;
5611 
5612 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
5613 }
5614 
cfs_rq_throttled(struct cfs_rq * cfs_rq)5615 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
5616 {
5617 	return cfs_bandwidth_used() && cfs_rq->throttled;
5618 }
5619 
5620 /* check whether cfs_rq, or any parent, is throttled */
throttled_hierarchy(struct cfs_rq * cfs_rq)5621 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
5622 {
5623 	return cfs_bandwidth_used() && cfs_rq->throttle_count;
5624 }
5625 
5626 /*
5627  * Ensure that neither of the group entities corresponding to src_cpu or
5628  * dest_cpu are members of a throttled hierarchy when performing group
5629  * load-balance operations.
5630  */
throttled_lb_pair(struct task_group * tg,int src_cpu,int dest_cpu)5631 static inline int throttled_lb_pair(struct task_group *tg,
5632 				    int src_cpu, int dest_cpu)
5633 {
5634 	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
5635 
5636 	src_cfs_rq = tg->cfs_rq[src_cpu];
5637 	dest_cfs_rq = tg->cfs_rq[dest_cpu];
5638 
5639 	return throttled_hierarchy(src_cfs_rq) ||
5640 	       throttled_hierarchy(dest_cfs_rq);
5641 }
5642 
tg_unthrottle_up(struct task_group * tg,void * data)5643 static int tg_unthrottle_up(struct task_group *tg, void *data)
5644 {
5645 	struct rq *rq = data;
5646 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5647 
5648 	cfs_rq->throttle_count--;
5649 	if (!cfs_rq->throttle_count) {
5650 		cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
5651 					     cfs_rq->throttled_clock_pelt;
5652 
5653 		/* Add cfs_rq with load or one or more already running entities to the list */
5654 		if (!cfs_rq_is_decayed(cfs_rq))
5655 			list_add_leaf_cfs_rq(cfs_rq);
5656 
5657 		if (cfs_rq->throttled_clock_self) {
5658 			u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self;
5659 
5660 			cfs_rq->throttled_clock_self = 0;
5661 
5662 			if (SCHED_WARN_ON((s64)delta < 0))
5663 				delta = 0;
5664 
5665 			cfs_rq->throttled_clock_self_time += delta;
5666 		}
5667 	}
5668 
5669 	return 0;
5670 }
5671 
tg_throttle_down(struct task_group * tg,void * data)5672 static int tg_throttle_down(struct task_group *tg, void *data)
5673 {
5674 	struct rq *rq = data;
5675 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5676 
5677 	/* group is entering throttled state, stop time */
5678 	if (!cfs_rq->throttle_count) {
5679 		cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
5680 		list_del_leaf_cfs_rq(cfs_rq);
5681 
5682 		SCHED_WARN_ON(cfs_rq->throttled_clock_self);
5683 		if (cfs_rq->nr_running)
5684 			cfs_rq->throttled_clock_self = rq_clock(rq);
5685 	}
5686 	cfs_rq->throttle_count++;
5687 
5688 	return 0;
5689 }
5690 
throttle_cfs_rq(struct cfs_rq * cfs_rq)5691 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5692 {
5693 	struct rq *rq = rq_of(cfs_rq);
5694 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5695 	struct sched_entity *se;
5696 	long task_delta, idle_task_delta, dequeue = 1;
5697 
5698 	raw_spin_lock(&cfs_b->lock);
5699 	/* This will start the period timer if necessary */
5700 	if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
5701 		/*
5702 		 * We have raced with bandwidth becoming available, and if we
5703 		 * actually throttled the timer might not unthrottle us for an
5704 		 * entire period. We additionally needed to make sure that any
5705 		 * subsequent check_cfs_rq_runtime calls agree not to throttle
5706 		 * us, as we may commit to do cfs put_prev+pick_next, so we ask
5707 		 * for 1ns of runtime rather than just check cfs_b.
5708 		 */
5709 		dequeue = 0;
5710 	} else {
5711 		list_add_tail_rcu(&cfs_rq->throttled_list,
5712 				  &cfs_b->throttled_cfs_rq);
5713 	}
5714 	raw_spin_unlock(&cfs_b->lock);
5715 
5716 	if (!dequeue)
5717 		return false;  /* Throttle no longer required. */
5718 
5719 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
5720 
5721 	/* freeze hierarchy runnable averages while throttled */
5722 	rcu_read_lock();
5723 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
5724 	rcu_read_unlock();
5725 
5726 	task_delta = cfs_rq->h_nr_running;
5727 	idle_task_delta = cfs_rq->idle_h_nr_running;
5728 	for_each_sched_entity(se) {
5729 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5730 		/* throttled entity or throttle-on-deactivate */
5731 		if (!se->on_rq)
5732 			goto done;
5733 
5734 		dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
5735 
5736 		if (cfs_rq_is_idle(group_cfs_rq(se)))
5737 			idle_task_delta = cfs_rq->h_nr_running;
5738 
5739 		qcfs_rq->h_nr_running -= task_delta;
5740 		qcfs_rq->idle_h_nr_running -= idle_task_delta;
5741 
5742 		if (qcfs_rq->load.weight) {
5743 			/* Avoid re-evaluating load for this entity: */
5744 			se = parent_entity(se);
5745 			break;
5746 		}
5747 	}
5748 
5749 	for_each_sched_entity(se) {
5750 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5751 		/* throttled entity or throttle-on-deactivate */
5752 		if (!se->on_rq)
5753 			goto done;
5754 
5755 		update_load_avg(qcfs_rq, se, 0);
5756 		se_update_runnable(se);
5757 
5758 		if (cfs_rq_is_idle(group_cfs_rq(se)))
5759 			idle_task_delta = cfs_rq->h_nr_running;
5760 
5761 		qcfs_rq->h_nr_running -= task_delta;
5762 		qcfs_rq->idle_h_nr_running -= idle_task_delta;
5763 	}
5764 
5765 	/* At this point se is NULL and we are at root level*/
5766 	sub_nr_running(rq, task_delta);
5767 
5768 done:
5769 	/*
5770 	 * Note: distribution will already see us throttled via the
5771 	 * throttled-list.  rq->lock protects completion.
5772 	 */
5773 	cfs_rq->throttled = 1;
5774 	SCHED_WARN_ON(cfs_rq->throttled_clock);
5775 	if (cfs_rq->nr_running)
5776 		cfs_rq->throttled_clock = rq_clock(rq);
5777 	return true;
5778 }
5779 
unthrottle_cfs_rq(struct cfs_rq * cfs_rq)5780 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
5781 {
5782 	struct rq *rq = rq_of(cfs_rq);
5783 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5784 	struct sched_entity *se;
5785 	long task_delta, idle_task_delta;
5786 
5787 	se = cfs_rq->tg->se[cpu_of(rq)];
5788 
5789 	cfs_rq->throttled = 0;
5790 
5791 	update_rq_clock(rq);
5792 
5793 	raw_spin_lock(&cfs_b->lock);
5794 	if (cfs_rq->throttled_clock) {
5795 		cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
5796 		cfs_rq->throttled_clock = 0;
5797 	}
5798 	list_del_rcu(&cfs_rq->throttled_list);
5799 	raw_spin_unlock(&cfs_b->lock);
5800 
5801 	/* update hierarchical throttle state */
5802 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
5803 
5804 	if (!cfs_rq->load.weight) {
5805 		if (!cfs_rq->on_list)
5806 			return;
5807 		/*
5808 		 * Nothing to run but something to decay (on_list)?
5809 		 * Complete the branch.
5810 		 */
5811 		for_each_sched_entity(se) {
5812 			if (list_add_leaf_cfs_rq(cfs_rq_of(se)))
5813 				break;
5814 		}
5815 		goto unthrottle_throttle;
5816 	}
5817 
5818 	task_delta = cfs_rq->h_nr_running;
5819 	idle_task_delta = cfs_rq->idle_h_nr_running;
5820 	for_each_sched_entity(se) {
5821 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5822 
5823 		if (se->on_rq)
5824 			break;
5825 		enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
5826 
5827 		if (cfs_rq_is_idle(group_cfs_rq(se)))
5828 			idle_task_delta = cfs_rq->h_nr_running;
5829 
5830 		qcfs_rq->h_nr_running += task_delta;
5831 		qcfs_rq->idle_h_nr_running += idle_task_delta;
5832 
5833 		/* end evaluation on encountering a throttled cfs_rq */
5834 		if (cfs_rq_throttled(qcfs_rq))
5835 			goto unthrottle_throttle;
5836 	}
5837 
5838 	for_each_sched_entity(se) {
5839 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5840 
5841 		update_load_avg(qcfs_rq, se, UPDATE_TG);
5842 		se_update_runnable(se);
5843 
5844 		if (cfs_rq_is_idle(group_cfs_rq(se)))
5845 			idle_task_delta = cfs_rq->h_nr_running;
5846 
5847 		qcfs_rq->h_nr_running += task_delta;
5848 		qcfs_rq->idle_h_nr_running += idle_task_delta;
5849 
5850 		/* end evaluation on encountering a throttled cfs_rq */
5851 		if (cfs_rq_throttled(qcfs_rq))
5852 			goto unthrottle_throttle;
5853 	}
5854 
5855 	/* At this point se is NULL and we are at root level*/
5856 	add_nr_running(rq, task_delta);
5857 
5858 unthrottle_throttle:
5859 	assert_list_leaf_cfs_rq(rq);
5860 
5861 	/* Determine whether we need to wake up potentially idle CPU: */
5862 	if (rq->curr == rq->idle && rq->cfs.nr_running)
5863 		resched_curr(rq);
5864 }
5865 
5866 #ifdef CONFIG_SMP
__cfsb_csd_unthrottle(void * arg)5867 static void __cfsb_csd_unthrottle(void *arg)
5868 {
5869 	struct cfs_rq *cursor, *tmp;
5870 	struct rq *rq = arg;
5871 	struct rq_flags rf;
5872 
5873 	rq_lock(rq, &rf);
5874 
5875 	/*
5876 	 * Iterating over the list can trigger several call to
5877 	 * update_rq_clock() in unthrottle_cfs_rq().
5878 	 * Do it once and skip the potential next ones.
5879 	 */
5880 	update_rq_clock(rq);
5881 	rq_clock_start_loop_update(rq);
5882 
5883 	/*
5884 	 * Since we hold rq lock we're safe from concurrent manipulation of
5885 	 * the CSD list. However, this RCU critical section annotates the
5886 	 * fact that we pair with sched_free_group_rcu(), so that we cannot
5887 	 * race with group being freed in the window between removing it
5888 	 * from the list and advancing to the next entry in the list.
5889 	 */
5890 	rcu_read_lock();
5891 
5892 	list_for_each_entry_safe(cursor, tmp, &rq->cfsb_csd_list,
5893 				 throttled_csd_list) {
5894 		list_del_init(&cursor->throttled_csd_list);
5895 
5896 		if (cfs_rq_throttled(cursor))
5897 			unthrottle_cfs_rq(cursor);
5898 	}
5899 
5900 	rcu_read_unlock();
5901 
5902 	rq_clock_stop_loop_update(rq);
5903 	rq_unlock(rq, &rf);
5904 }
5905 
__unthrottle_cfs_rq_async(struct cfs_rq * cfs_rq)5906 static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
5907 {
5908 	struct rq *rq = rq_of(cfs_rq);
5909 	bool first;
5910 
5911 	if (rq == this_rq()) {
5912 		unthrottle_cfs_rq(cfs_rq);
5913 		return;
5914 	}
5915 
5916 	/* Already enqueued */
5917 	if (SCHED_WARN_ON(!list_empty(&cfs_rq->throttled_csd_list)))
5918 		return;
5919 
5920 	first = list_empty(&rq->cfsb_csd_list);
5921 	list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list);
5922 	if (first)
5923 		smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd);
5924 }
5925 #else
__unthrottle_cfs_rq_async(struct cfs_rq * cfs_rq)5926 static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
5927 {
5928 	unthrottle_cfs_rq(cfs_rq);
5929 }
5930 #endif
5931 
unthrottle_cfs_rq_async(struct cfs_rq * cfs_rq)5932 static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
5933 {
5934 	lockdep_assert_rq_held(rq_of(cfs_rq));
5935 
5936 	if (SCHED_WARN_ON(!cfs_rq_throttled(cfs_rq) ||
5937 	    cfs_rq->runtime_remaining <= 0))
5938 		return;
5939 
5940 	__unthrottle_cfs_rq_async(cfs_rq);
5941 }
5942 
distribute_cfs_runtime(struct cfs_bandwidth * cfs_b)5943 static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
5944 {
5945 	struct cfs_rq *local_unthrottle = NULL;
5946 	int this_cpu = smp_processor_id();
5947 	u64 runtime, remaining = 1;
5948 	bool throttled = false;
5949 	struct cfs_rq *cfs_rq;
5950 	struct rq_flags rf;
5951 	struct rq *rq;
5952 
5953 	rcu_read_lock();
5954 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
5955 				throttled_list) {
5956 		rq = rq_of(cfs_rq);
5957 
5958 		if (!remaining) {
5959 			throttled = true;
5960 			break;
5961 		}
5962 
5963 		rq_lock_irqsave(rq, &rf);
5964 		if (!cfs_rq_throttled(cfs_rq))
5965 			goto next;
5966 
5967 #ifdef CONFIG_SMP
5968 		/* Already queued for async unthrottle */
5969 		if (!list_empty(&cfs_rq->throttled_csd_list))
5970 			goto next;
5971 #endif
5972 
5973 		/* By the above checks, this should never be true */
5974 		SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
5975 
5976 		raw_spin_lock(&cfs_b->lock);
5977 		runtime = -cfs_rq->runtime_remaining + 1;
5978 		if (runtime > cfs_b->runtime)
5979 			runtime = cfs_b->runtime;
5980 		cfs_b->runtime -= runtime;
5981 		remaining = cfs_b->runtime;
5982 		raw_spin_unlock(&cfs_b->lock);
5983 
5984 		cfs_rq->runtime_remaining += runtime;
5985 
5986 		/* we check whether we're throttled above */
5987 		if (cfs_rq->runtime_remaining > 0) {
5988 			if (cpu_of(rq) != this_cpu ||
5989 			    SCHED_WARN_ON(local_unthrottle))
5990 				unthrottle_cfs_rq_async(cfs_rq);
5991 			else
5992 				local_unthrottle = cfs_rq;
5993 		} else {
5994 			throttled = true;
5995 		}
5996 
5997 next:
5998 		rq_unlock_irqrestore(rq, &rf);
5999 	}
6000 	rcu_read_unlock();
6001 
6002 	if (local_unthrottle) {
6003 		rq = cpu_rq(this_cpu);
6004 		rq_lock_irqsave(rq, &rf);
6005 		if (cfs_rq_throttled(local_unthrottle))
6006 			unthrottle_cfs_rq(local_unthrottle);
6007 		rq_unlock_irqrestore(rq, &rf);
6008 	}
6009 
6010 	return throttled;
6011 }
6012 
6013 /*
6014  * Responsible for refilling a task_group's bandwidth and unthrottling its
6015  * cfs_rqs as appropriate. If there has been no activity within the last
6016  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
6017  * used to track this state.
6018  */
do_sched_cfs_period_timer(struct cfs_bandwidth * cfs_b,int overrun,unsigned long flags)6019 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
6020 {
6021 	int throttled;
6022 
6023 	/* no need to continue the timer with no bandwidth constraint */
6024 	if (cfs_b->quota == RUNTIME_INF)
6025 		goto out_deactivate;
6026 
6027 	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
6028 	cfs_b->nr_periods += overrun;
6029 
6030 	/* Refill extra burst quota even if cfs_b->idle */
6031 	__refill_cfs_bandwidth_runtime(cfs_b);
6032 
6033 	/*
6034 	 * idle depends on !throttled (for the case of a large deficit), and if
6035 	 * we're going inactive then everything else can be deferred
6036 	 */
6037 	if (cfs_b->idle && !throttled)
6038 		goto out_deactivate;
6039 
6040 	if (!throttled) {
6041 		/* mark as potentially idle for the upcoming period */
6042 		cfs_b->idle = 1;
6043 		return 0;
6044 	}
6045 
6046 	/* account preceding periods in which throttling occurred */
6047 	cfs_b->nr_throttled += overrun;
6048 
6049 	/*
6050 	 * This check is repeated as we release cfs_b->lock while we unthrottle.
6051 	 */
6052 	while (throttled && cfs_b->runtime > 0) {
6053 		raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6054 		/* we can't nest cfs_b->lock while distributing bandwidth */
6055 		throttled = distribute_cfs_runtime(cfs_b);
6056 		raw_spin_lock_irqsave(&cfs_b->lock, flags);
6057 	}
6058 
6059 	/*
6060 	 * While we are ensured activity in the period following an
6061 	 * unthrottle, this also covers the case in which the new bandwidth is
6062 	 * insufficient to cover the existing bandwidth deficit.  (Forcing the
6063 	 * timer to remain active while there are any throttled entities.)
6064 	 */
6065 	cfs_b->idle = 0;
6066 
6067 	return 0;
6068 
6069 out_deactivate:
6070 	return 1;
6071 }
6072 
6073 /* a cfs_rq won't donate quota below this amount */
6074 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
6075 /* minimum remaining period time to redistribute slack quota */
6076 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
6077 /* how long we wait to gather additional slack before distributing */
6078 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
6079 
6080 /*
6081  * Are we near the end of the current quota period?
6082  *
6083  * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
6084  * hrtimer base being cleared by hrtimer_start. In the case of
6085  * migrate_hrtimers, base is never cleared, so we are fine.
6086  */
runtime_refresh_within(struct cfs_bandwidth * cfs_b,u64 min_expire)6087 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
6088 {
6089 	struct hrtimer *refresh_timer = &cfs_b->period_timer;
6090 	s64 remaining;
6091 
6092 	/* if the call-back is running a quota refresh is already occurring */
6093 	if (hrtimer_callback_running(refresh_timer))
6094 		return 1;
6095 
6096 	/* is a quota refresh about to occur? */
6097 	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
6098 	if (remaining < (s64)min_expire)
6099 		return 1;
6100 
6101 	return 0;
6102 }
6103 
start_cfs_slack_bandwidth(struct cfs_bandwidth * cfs_b)6104 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
6105 {
6106 	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
6107 
6108 	/* if there's a quota refresh soon don't bother with slack */
6109 	if (runtime_refresh_within(cfs_b, min_left))
6110 		return;
6111 
6112 	/* don't push forwards an existing deferred unthrottle */
6113 	if (cfs_b->slack_started)
6114 		return;
6115 	cfs_b->slack_started = true;
6116 
6117 	hrtimer_start(&cfs_b->slack_timer,
6118 			ns_to_ktime(cfs_bandwidth_slack_period),
6119 			HRTIMER_MODE_REL);
6120 }
6121 
6122 /* we know any runtime found here is valid as update_curr() precedes return */
__return_cfs_rq_runtime(struct cfs_rq * cfs_rq)6123 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
6124 {
6125 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
6126 	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
6127 
6128 	if (slack_runtime <= 0)
6129 		return;
6130 
6131 	raw_spin_lock(&cfs_b->lock);
6132 	if (cfs_b->quota != RUNTIME_INF) {
6133 		cfs_b->runtime += slack_runtime;
6134 
6135 		/* we are under rq->lock, defer unthrottling using a timer */
6136 		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
6137 		    !list_empty(&cfs_b->throttled_cfs_rq))
6138 			start_cfs_slack_bandwidth(cfs_b);
6139 	}
6140 	raw_spin_unlock(&cfs_b->lock);
6141 
6142 	/* even if it's not valid for return we don't want to try again */
6143 	cfs_rq->runtime_remaining -= slack_runtime;
6144 }
6145 
return_cfs_rq_runtime(struct cfs_rq * cfs_rq)6146 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
6147 {
6148 	if (!cfs_bandwidth_used())
6149 		return;
6150 
6151 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
6152 		return;
6153 
6154 	__return_cfs_rq_runtime(cfs_rq);
6155 }
6156 
6157 /*
6158  * This is done with a timer (instead of inline with bandwidth return) since
6159  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
6160  */
do_sched_cfs_slack_timer(struct cfs_bandwidth * cfs_b)6161 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
6162 {
6163 	u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
6164 	unsigned long flags;
6165 
6166 	/* confirm we're still not at a refresh boundary */
6167 	raw_spin_lock_irqsave(&cfs_b->lock, flags);
6168 	cfs_b->slack_started = false;
6169 
6170 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
6171 		raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6172 		return;
6173 	}
6174 
6175 	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
6176 		runtime = cfs_b->runtime;
6177 
6178 	raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6179 
6180 	if (!runtime)
6181 		return;
6182 
6183 	distribute_cfs_runtime(cfs_b);
6184 }
6185 
6186 /*
6187  * When a group wakes up we want to make sure that its quota is not already
6188  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
6189  * runtime as update_curr() throttling can not trigger until it's on-rq.
6190  */
check_enqueue_throttle(struct cfs_rq * cfs_rq)6191 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
6192 {
6193 	if (!cfs_bandwidth_used())
6194 		return;
6195 
6196 	/* an active group must be handled by the update_curr()->put() path */
6197 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
6198 		return;
6199 
6200 	/* ensure the group is not already throttled */
6201 	if (cfs_rq_throttled(cfs_rq))
6202 		return;
6203 
6204 	/* update runtime allocation */
6205 	account_cfs_rq_runtime(cfs_rq, 0);
6206 	if (cfs_rq->runtime_remaining <= 0)
6207 		throttle_cfs_rq(cfs_rq);
6208 }
6209 
sync_throttle(struct task_group * tg,int cpu)6210 static void sync_throttle(struct task_group *tg, int cpu)
6211 {
6212 	struct cfs_rq *pcfs_rq, *cfs_rq;
6213 
6214 	if (!cfs_bandwidth_used())
6215 		return;
6216 
6217 	if (!tg->parent)
6218 		return;
6219 
6220 	cfs_rq = tg->cfs_rq[cpu];
6221 	pcfs_rq = tg->parent->cfs_rq[cpu];
6222 
6223 	cfs_rq->throttle_count = pcfs_rq->throttle_count;
6224 	cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
6225 }
6226 
6227 /* conditionally throttle active cfs_rq's from put_prev_entity() */
check_cfs_rq_runtime(struct cfs_rq * cfs_rq)6228 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
6229 {
6230 	if (!cfs_bandwidth_used())
6231 		return false;
6232 
6233 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
6234 		return false;
6235 
6236 	/*
6237 	 * it's possible for a throttled entity to be forced into a running
6238 	 * state (e.g. set_curr_task), in this case we're finished.
6239 	 */
6240 	if (cfs_rq_throttled(cfs_rq))
6241 		return true;
6242 
6243 	return throttle_cfs_rq(cfs_rq);
6244 }
6245 
sched_cfs_slack_timer(struct hrtimer * timer)6246 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
6247 {
6248 	struct cfs_bandwidth *cfs_b =
6249 		container_of(timer, struct cfs_bandwidth, slack_timer);
6250 
6251 	do_sched_cfs_slack_timer(cfs_b);
6252 
6253 	return HRTIMER_NORESTART;
6254 }
6255 
6256 extern const u64 max_cfs_quota_period;
6257 
sched_cfs_period_timer(struct hrtimer * timer)6258 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
6259 {
6260 	struct cfs_bandwidth *cfs_b =
6261 		container_of(timer, struct cfs_bandwidth, period_timer);
6262 	unsigned long flags;
6263 	int overrun;
6264 	int idle = 0;
6265 	int count = 0;
6266 
6267 	raw_spin_lock_irqsave(&cfs_b->lock, flags);
6268 	for (;;) {
6269 		overrun = hrtimer_forward_now(timer, cfs_b->period);
6270 		if (!overrun)
6271 			break;
6272 
6273 		idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
6274 
6275 		if (++count > 3) {
6276 			u64 new, old = ktime_to_ns(cfs_b->period);
6277 
6278 			/*
6279 			 * Grow period by a factor of 2 to avoid losing precision.
6280 			 * Precision loss in the quota/period ratio can cause __cfs_schedulable
6281 			 * to fail.
6282 			 */
6283 			new = old * 2;
6284 			if (new < max_cfs_quota_period) {
6285 				cfs_b->period = ns_to_ktime(new);
6286 				cfs_b->quota *= 2;
6287 				cfs_b->burst *= 2;
6288 
6289 				pr_warn_ratelimited(
6290 	"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
6291 					smp_processor_id(),
6292 					div_u64(new, NSEC_PER_USEC),
6293 					div_u64(cfs_b->quota, NSEC_PER_USEC));
6294 			} else {
6295 				pr_warn_ratelimited(
6296 	"cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
6297 					smp_processor_id(),
6298 					div_u64(old, NSEC_PER_USEC),
6299 					div_u64(cfs_b->quota, NSEC_PER_USEC));
6300 			}
6301 
6302 			/* reset count so we don't come right back in here */
6303 			count = 0;
6304 		}
6305 	}
6306 	if (idle)
6307 		cfs_b->period_active = 0;
6308 	raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6309 
6310 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
6311 }
6312 
init_cfs_bandwidth(struct cfs_bandwidth * cfs_b,struct cfs_bandwidth * parent)6313 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent)
6314 {
6315 	raw_spin_lock_init(&cfs_b->lock);
6316 	cfs_b->runtime = 0;
6317 	cfs_b->quota = RUNTIME_INF;
6318 	cfs_b->period = ns_to_ktime(default_cfs_period());
6319 	cfs_b->burst = 0;
6320 	cfs_b->hierarchical_quota = parent ? parent->hierarchical_quota : RUNTIME_INF;
6321 
6322 	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
6323 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
6324 	cfs_b->period_timer.function = sched_cfs_period_timer;
6325 
6326 	/* Add a random offset so that timers interleave */
6327 	hrtimer_set_expires(&cfs_b->period_timer,
6328 			    get_random_u32_below(cfs_b->period));
6329 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6330 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
6331 	cfs_b->slack_started = false;
6332 }
6333 
init_cfs_rq_runtime(struct cfs_rq * cfs_rq)6334 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
6335 {
6336 	cfs_rq->runtime_enabled = 0;
6337 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
6338 #ifdef CONFIG_SMP
6339 	INIT_LIST_HEAD(&cfs_rq->throttled_csd_list);
6340 #endif
6341 }
6342 
start_cfs_bandwidth(struct cfs_bandwidth * cfs_b)6343 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
6344 {
6345 	lockdep_assert_held(&cfs_b->lock);
6346 
6347 	if (cfs_b->period_active)
6348 		return;
6349 
6350 	cfs_b->period_active = 1;
6351 	hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
6352 	hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
6353 }
6354 
destroy_cfs_bandwidth(struct cfs_bandwidth * cfs_b)6355 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
6356 {
6357 	int __maybe_unused i;
6358 
6359 	/* init_cfs_bandwidth() was not called */
6360 	if (!cfs_b->throttled_cfs_rq.next)
6361 		return;
6362 
6363 	hrtimer_cancel(&cfs_b->period_timer);
6364 	hrtimer_cancel(&cfs_b->slack_timer);
6365 
6366 	/*
6367 	 * It is possible that we still have some cfs_rq's pending on a CSD
6368 	 * list, though this race is very rare. In order for this to occur, we
6369 	 * must have raced with the last task leaving the group while there
6370 	 * exist throttled cfs_rq(s), and the period_timer must have queued the
6371 	 * CSD item but the remote cpu has not yet processed it. To handle this,
6372 	 * we can simply flush all pending CSD work inline here. We're
6373 	 * guaranteed at this point that no additional cfs_rq of this group can
6374 	 * join a CSD list.
6375 	 */
6376 #ifdef CONFIG_SMP
6377 	for_each_possible_cpu(i) {
6378 		struct rq *rq = cpu_rq(i);
6379 		unsigned long flags;
6380 
6381 		if (list_empty(&rq->cfsb_csd_list))
6382 			continue;
6383 
6384 		local_irq_save(flags);
6385 		__cfsb_csd_unthrottle(rq);
6386 		local_irq_restore(flags);
6387 	}
6388 #endif
6389 }
6390 
6391 /*
6392  * Both these CPU hotplug callbacks race against unregister_fair_sched_group()
6393  *
6394  * The race is harmless, since modifying bandwidth settings of unhooked group
6395  * bits doesn't do much.
6396  */
6397 
6398 /* cpu online callback */
update_runtime_enabled(struct rq * rq)6399 static void __maybe_unused update_runtime_enabled(struct rq *rq)
6400 {
6401 	struct task_group *tg;
6402 
6403 	lockdep_assert_rq_held(rq);
6404 
6405 	rcu_read_lock();
6406 	list_for_each_entry_rcu(tg, &task_groups, list) {
6407 		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
6408 		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
6409 
6410 		raw_spin_lock(&cfs_b->lock);
6411 		cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
6412 		raw_spin_unlock(&cfs_b->lock);
6413 	}
6414 	rcu_read_unlock();
6415 }
6416 
6417 /* cpu offline callback */
unthrottle_offline_cfs_rqs(struct rq * rq)6418 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
6419 {
6420 	struct task_group *tg;
6421 
6422 	lockdep_assert_rq_held(rq);
6423 
6424 	/*
6425 	 * The rq clock has already been updated in the
6426 	 * set_rq_offline(), so we should skip updating
6427 	 * the rq clock again in unthrottle_cfs_rq().
6428 	 */
6429 	rq_clock_start_loop_update(rq);
6430 
6431 	rcu_read_lock();
6432 	list_for_each_entry_rcu(tg, &task_groups, list) {
6433 		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
6434 
6435 		if (!cfs_rq->runtime_enabled)
6436 			continue;
6437 
6438 		/*
6439 		 * clock_task is not advancing so we just need to make sure
6440 		 * there's some valid quota amount
6441 		 */
6442 		cfs_rq->runtime_remaining = 1;
6443 		/*
6444 		 * Offline rq is schedulable till CPU is completely disabled
6445 		 * in take_cpu_down(), so we prevent new cfs throttling here.
6446 		 */
6447 		cfs_rq->runtime_enabled = 0;
6448 
6449 		if (cfs_rq_throttled(cfs_rq))
6450 			unthrottle_cfs_rq(cfs_rq);
6451 	}
6452 	rcu_read_unlock();
6453 
6454 	rq_clock_stop_loop_update(rq);
6455 }
6456 
cfs_task_bw_constrained(struct task_struct * p)6457 bool cfs_task_bw_constrained(struct task_struct *p)
6458 {
6459 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
6460 
6461 	if (!cfs_bandwidth_used())
6462 		return false;
6463 
6464 	if (cfs_rq->runtime_enabled ||
6465 	    tg_cfs_bandwidth(cfs_rq->tg)->hierarchical_quota != RUNTIME_INF)
6466 		return true;
6467 
6468 	return false;
6469 }
6470 
6471 #ifdef CONFIG_NO_HZ_FULL
6472 /* called from pick_next_task_fair() */
sched_fair_update_stop_tick(struct rq * rq,struct task_struct * p)6473 static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p)
6474 {
6475 	int cpu = cpu_of(rq);
6476 
6477 	if (!sched_feat(HZ_BW) || !cfs_bandwidth_used())
6478 		return;
6479 
6480 	if (!tick_nohz_full_cpu(cpu))
6481 		return;
6482 
6483 	if (rq->nr_running != 1)
6484 		return;
6485 
6486 	/*
6487 	 *  We know there is only one task runnable and we've just picked it. The
6488 	 *  normal enqueue path will have cleared TICK_DEP_BIT_SCHED if we will
6489 	 *  be otherwise able to stop the tick. Just need to check if we are using
6490 	 *  bandwidth control.
6491 	 */
6492 	if (cfs_task_bw_constrained(p))
6493 		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
6494 }
6495 #endif
6496 
6497 #else /* CONFIG_CFS_BANDWIDTH */
6498 
cfs_bandwidth_used(void)6499 static inline bool cfs_bandwidth_used(void)
6500 {
6501 	return false;
6502 }
6503 
account_cfs_rq_runtime(struct cfs_rq * cfs_rq,u64 delta_exec)6504 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
check_cfs_rq_runtime(struct cfs_rq * cfs_rq)6505 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
check_enqueue_throttle(struct cfs_rq * cfs_rq)6506 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
sync_throttle(struct task_group * tg,int cpu)6507 static inline void sync_throttle(struct task_group *tg, int cpu) {}
return_cfs_rq_runtime(struct cfs_rq * cfs_rq)6508 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
6509 
cfs_rq_throttled(struct cfs_rq * cfs_rq)6510 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
6511 {
6512 	return 0;
6513 }
6514 
throttled_hierarchy(struct cfs_rq * cfs_rq)6515 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
6516 {
6517 	return 0;
6518 }
6519 
throttled_lb_pair(struct task_group * tg,int src_cpu,int dest_cpu)6520 static inline int throttled_lb_pair(struct task_group *tg,
6521 				    int src_cpu, int dest_cpu)
6522 {
6523 	return 0;
6524 }
6525 
6526 #ifdef CONFIG_FAIR_GROUP_SCHED
init_cfs_bandwidth(struct cfs_bandwidth * cfs_b,struct cfs_bandwidth * parent)6527 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent) {}
init_cfs_rq_runtime(struct cfs_rq * cfs_rq)6528 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
6529 #endif
6530 
tg_cfs_bandwidth(struct task_group * tg)6531 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
6532 {
6533 	return NULL;
6534 }
destroy_cfs_bandwidth(struct cfs_bandwidth * cfs_b)6535 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
update_runtime_enabled(struct rq * rq)6536 static inline void update_runtime_enabled(struct rq *rq) {}
unthrottle_offline_cfs_rqs(struct rq * rq)6537 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
6538 #ifdef CONFIG_CGROUP_SCHED
cfs_task_bw_constrained(struct task_struct * p)6539 bool cfs_task_bw_constrained(struct task_struct *p)
6540 {
6541 	return false;
6542 }
6543 #endif
6544 #endif /* CONFIG_CFS_BANDWIDTH */
6545 
6546 #if !defined(CONFIG_CFS_BANDWIDTH) || !defined(CONFIG_NO_HZ_FULL)
sched_fair_update_stop_tick(struct rq * rq,struct task_struct * p)6547 static inline void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) {}
6548 #endif
6549 
6550 /**************************************************
6551  * CFS operations on tasks:
6552  */
6553 
6554 #ifdef CONFIG_SCHED_HRTICK
hrtick_start_fair(struct rq * rq,struct task_struct * p)6555 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
6556 {
6557 	struct sched_entity *se = &p->se;
6558 
6559 	SCHED_WARN_ON(task_rq(p) != rq);
6560 
6561 	if (rq->cfs.h_nr_running > 1) {
6562 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
6563 		u64 slice = se->slice;
6564 		s64 delta = slice - ran;
6565 
6566 		if (delta < 0) {
6567 			if (task_current(rq, p))
6568 				resched_curr(rq);
6569 			return;
6570 		}
6571 		hrtick_start(rq, delta);
6572 	}
6573 }
6574 
6575 /*
6576  * called from enqueue/dequeue and updates the hrtick when the
6577  * current task is from our class and nr_running is low enough
6578  * to matter.
6579  */
hrtick_update(struct rq * rq)6580 static void hrtick_update(struct rq *rq)
6581 {
6582 	struct task_struct *curr = rq->curr;
6583 
6584 	if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class)
6585 		return;
6586 
6587 	hrtick_start_fair(rq, curr);
6588 }
6589 #else /* !CONFIG_SCHED_HRTICK */
6590 static inline void
hrtick_start_fair(struct rq * rq,struct task_struct * p)6591 hrtick_start_fair(struct rq *rq, struct task_struct *p)
6592 {
6593 }
6594 
hrtick_update(struct rq * rq)6595 static inline void hrtick_update(struct rq *rq)
6596 {
6597 }
6598 #endif
6599 
6600 #ifdef CONFIG_SMP
cpu_overutilized(int cpu)6601 static inline bool cpu_overutilized(int cpu)
6602 {
6603 	unsigned long  rq_util_min, rq_util_max;
6604 	int overutilized = -1;
6605 
6606 	trace_android_rvh_cpu_overutilized(cpu, &overutilized);
6607 	if (overutilized != -1)
6608 		return overutilized;
6609 
6610 	if (!sched_energy_enabled())
6611 		return false;
6612 
6613 	rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
6614 	rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
6615 
6616 	/* Return true only if the utilization doesn't fit CPU's capacity */
6617 	return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
6618 }
6619 
set_rd_overutilized_status(struct root_domain * rd,unsigned int status)6620 static inline void set_rd_overutilized_status(struct root_domain *rd,
6621 					      unsigned int status)
6622 {
6623 	if (!sched_energy_enabled())
6624 		return;
6625 
6626 	WRITE_ONCE(rd->overutilized, status);
6627 	trace_sched_overutilized_tp(rd, !!status);
6628 }
6629 
check_update_overutilized_status(struct rq * rq)6630 static inline void check_update_overutilized_status(struct rq *rq)
6631 {
6632 	/*
6633 	 * overutilized field is used for load balancing decisions only
6634 	 * if energy aware scheduler is being used
6635 	 */
6636 	if (!sched_energy_enabled())
6637 		return;
6638 
6639 	if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu))
6640 		set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED);
6641 }
6642 #else
check_update_overutilized_status(struct rq * rq)6643 static inline void check_update_overutilized_status(struct rq *rq) { }
6644 #endif
6645 
6646 /* Runqueue only has SCHED_IDLE tasks enqueued */
sched_idle_rq(struct rq * rq)6647 static int sched_idle_rq(struct rq *rq)
6648 {
6649 	return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
6650 			rq->nr_running);
6651 }
6652 
6653 #ifdef CONFIG_SMP
sched_idle_cpu(int cpu)6654 static int sched_idle_cpu(int cpu)
6655 {
6656 	return sched_idle_rq(cpu_rq(cpu));
6657 }
6658 #endif
6659 
6660 /*
6661  * The enqueue_task method is called before nr_running is
6662  * increased. Here we update the fair scheduling stats and
6663  * then put the task into the rbtree:
6664  */
6665 static void
enqueue_task_fair(struct rq * rq,struct task_struct * p,int flags)6666 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6667 {
6668 	struct cfs_rq *cfs_rq;
6669 	struct sched_entity *se = &p->se;
6670 	int idle_h_nr_running = task_has_idle_policy(p);
6671 	int task_new = !(flags & ENQUEUE_WAKEUP);
6672 	int should_iowait_boost;
6673 
6674 	/*
6675 	 * The code below (indirectly) updates schedutil which looks at
6676 	 * the cfs_rq utilization to select a frequency.
6677 	 * Let's add the task's estimated utilization to the cfs_rq's
6678 	 * estimated utilization, before we update schedutil.
6679 	 */
6680 	util_est_enqueue(&rq->cfs, p);
6681 
6682 	/*
6683 	 * If in_iowait is set, the code below may not trigger any cpufreq
6684 	 * utilization updates, so do it here explicitly with the IOWAIT flag
6685 	 * passed.
6686 	 */
6687 	should_iowait_boost = p->in_iowait;
6688 	trace_android_rvh_set_iowait(p, rq, &should_iowait_boost);
6689 	if (should_iowait_boost)
6690 		cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
6691 
6692 	for_each_sched_entity(se) {
6693 		if (se->on_rq)
6694 			break;
6695 		cfs_rq = cfs_rq_of(se);
6696 		enqueue_entity(cfs_rq, se, flags);
6697 
6698 		cfs_rq->h_nr_running++;
6699 		cfs_rq->idle_h_nr_running += idle_h_nr_running;
6700 
6701 		if (cfs_rq_is_idle(cfs_rq))
6702 			idle_h_nr_running = 1;
6703 
6704 		/* end evaluation on encountering a throttled cfs_rq */
6705 		if (cfs_rq_throttled(cfs_rq))
6706 			goto enqueue_throttle;
6707 
6708 		flags = ENQUEUE_WAKEUP;
6709 	}
6710 
6711 	trace_android_rvh_enqueue_task_fair(rq, p, flags);
6712 	for_each_sched_entity(se) {
6713 		cfs_rq = cfs_rq_of(se);
6714 
6715 		update_load_avg(cfs_rq, se, UPDATE_TG);
6716 		se_update_runnable(se);
6717 		update_cfs_group(se);
6718 
6719 		cfs_rq->h_nr_running++;
6720 		cfs_rq->idle_h_nr_running += idle_h_nr_running;
6721 
6722 		if (cfs_rq_is_idle(cfs_rq))
6723 			idle_h_nr_running = 1;
6724 
6725 		/* end evaluation on encountering a throttled cfs_rq */
6726 		if (cfs_rq_throttled(cfs_rq))
6727 			goto enqueue_throttle;
6728 	}
6729 
6730 	/* At this point se is NULL and we are at root level*/
6731 	add_nr_running(rq, 1);
6732 
6733 	/*
6734 	 * Since new tasks are assigned an initial util_avg equal to
6735 	 * half of the spare capacity of their CPU, tiny tasks have the
6736 	 * ability to cross the overutilized threshold, which will
6737 	 * result in the load balancer ruining all the task placement
6738 	 * done by EAS. As a way to mitigate that effect, do not account
6739 	 * for the first enqueue operation of new tasks during the
6740 	 * overutilized flag detection.
6741 	 *
6742 	 * A better way of solving this problem would be to wait for
6743 	 * the PELT signals of tasks to converge before taking them
6744 	 * into account, but that is not straightforward to implement,
6745 	 * and the following generally works well enough in practice.
6746 	 */
6747 	if (!task_new)
6748 		check_update_overutilized_status(rq);
6749 
6750 enqueue_throttle:
6751 	assert_list_leaf_cfs_rq(rq);
6752 
6753 	hrtick_update(rq);
6754 }
6755 
6756 static void set_next_buddy(struct sched_entity *se);
6757 
6758 /*
6759  * The dequeue_task method is called before nr_running is
6760  * decreased. We remove the task from the rbtree and
6761  * update the fair scheduling stats:
6762  */
dequeue_task_fair(struct rq * rq,struct task_struct * p,int flags)6763 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6764 {
6765 	struct cfs_rq *cfs_rq;
6766 	struct sched_entity *se = &p->se;
6767 	int task_sleep = flags & DEQUEUE_SLEEP;
6768 	int idle_h_nr_running = task_has_idle_policy(p);
6769 	bool was_sched_idle = sched_idle_rq(rq);
6770 
6771 	util_est_dequeue(&rq->cfs, p);
6772 
6773 	for_each_sched_entity(se) {
6774 		cfs_rq = cfs_rq_of(se);
6775 		dequeue_entity(cfs_rq, se, flags);
6776 
6777 		cfs_rq->h_nr_running--;
6778 		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
6779 
6780 		if (cfs_rq_is_idle(cfs_rq))
6781 			idle_h_nr_running = 1;
6782 
6783 		/* end evaluation on encountering a throttled cfs_rq */
6784 		if (cfs_rq_throttled(cfs_rq))
6785 			goto dequeue_throttle;
6786 
6787 		/* Don't dequeue parent if it has other entities besides us */
6788 		if (cfs_rq->load.weight) {
6789 			/* Avoid re-evaluating load for this entity: */
6790 			se = parent_entity(se);
6791 			/*
6792 			 * Bias pick_next to pick a task from this cfs_rq, as
6793 			 * p is sleeping when it is within its sched_slice.
6794 			 */
6795 			if (task_sleep && se && !throttled_hierarchy(cfs_rq))
6796 				set_next_buddy(se);
6797 			break;
6798 		}
6799 		flags |= DEQUEUE_SLEEP;
6800 	}
6801 
6802 	trace_android_rvh_dequeue_task_fair(rq, p, flags);
6803 	for_each_sched_entity(se) {
6804 		cfs_rq = cfs_rq_of(se);
6805 
6806 		update_load_avg(cfs_rq, se, UPDATE_TG);
6807 		se_update_runnable(se);
6808 		update_cfs_group(se);
6809 
6810 		cfs_rq->h_nr_running--;
6811 		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
6812 
6813 		if (cfs_rq_is_idle(cfs_rq))
6814 			idle_h_nr_running = 1;
6815 
6816 		/* end evaluation on encountering a throttled cfs_rq */
6817 		if (cfs_rq_throttled(cfs_rq))
6818 			goto dequeue_throttle;
6819 
6820 	}
6821 
6822 	/* At this point se is NULL and we are at root level*/
6823 	sub_nr_running(rq, 1);
6824 
6825 	/* balance early to pull high priority tasks */
6826 	if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
6827 		rq->next_balance = jiffies;
6828 
6829 dequeue_throttle:
6830 	util_est_update(&rq->cfs, p, task_sleep);
6831 	hrtick_update(rq);
6832 }
6833 
6834 #ifdef CONFIG_SMP
6835 
6836 /* Working cpumask for: load_balance, load_balance_newidle. */
6837 static DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
6838 static DEFINE_PER_CPU(cpumask_var_t, select_rq_mask);
6839 static DEFINE_PER_CPU(cpumask_var_t, should_we_balance_tmpmask);
6840 
6841 #ifdef CONFIG_NO_HZ_COMMON
6842 
6843 static struct {
6844 	cpumask_var_t idle_cpus_mask;
6845 	atomic_t nr_cpus;
6846 	int has_blocked;		/* Idle CPUS has blocked load */
6847 	int needs_update;		/* Newly idle CPUs need their next_balance collated */
6848 	unsigned long next_balance;     /* in jiffy units */
6849 	unsigned long next_blocked;	/* Next update of blocked load in jiffies */
6850 } nohz ____cacheline_aligned;
6851 
6852 #endif /* CONFIG_NO_HZ_COMMON */
6853 
cpu_load(struct rq * rq)6854 static unsigned long cpu_load(struct rq *rq)
6855 {
6856 	return cfs_rq_load_avg(&rq->cfs);
6857 }
6858 
6859 /*
6860  * cpu_load_without - compute CPU load without any contributions from *p
6861  * @cpu: the CPU which load is requested
6862  * @p: the task which load should be discounted
6863  *
6864  * The load of a CPU is defined by the load of tasks currently enqueued on that
6865  * CPU as well as tasks which are currently sleeping after an execution on that
6866  * CPU.
6867  *
6868  * This method returns the load of the specified CPU by discounting the load of
6869  * the specified task, whenever the task is currently contributing to the CPU
6870  * load.
6871  */
cpu_load_without(struct rq * rq,struct task_struct * p)6872 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
6873 {
6874 	struct cfs_rq *cfs_rq;
6875 	unsigned int load;
6876 
6877 	/* Task has no contribution or is new */
6878 	if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6879 		return cpu_load(rq);
6880 
6881 	cfs_rq = &rq->cfs;
6882 	load = READ_ONCE(cfs_rq->avg.load_avg);
6883 
6884 	/* Discount task's util from CPU's util */
6885 	lsub_positive(&load, task_h_load(p));
6886 
6887 	return load;
6888 }
6889 
cpu_runnable(struct rq * rq)6890 static unsigned long cpu_runnable(struct rq *rq)
6891 {
6892 	return cfs_rq_runnable_avg(&rq->cfs);
6893 }
6894 
cpu_runnable_without(struct rq * rq,struct task_struct * p)6895 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
6896 {
6897 	struct cfs_rq *cfs_rq;
6898 	unsigned int runnable;
6899 
6900 	/* Task has no contribution or is new */
6901 	if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6902 		return cpu_runnable(rq);
6903 
6904 	cfs_rq = &rq->cfs;
6905 	runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
6906 
6907 	/* Discount task's runnable from CPU's runnable */
6908 	lsub_positive(&runnable, p->se.avg.runnable_avg);
6909 
6910 	return runnable;
6911 }
6912 
capacity_of(int cpu)6913 static unsigned long capacity_of(int cpu)
6914 {
6915 	return cpu_rq(cpu)->cpu_capacity;
6916 }
6917 
record_wakee(struct task_struct * p)6918 static void record_wakee(struct task_struct *p)
6919 {
6920 	/*
6921 	 * Only decay a single time; tasks that have less then 1 wakeup per
6922 	 * jiffy will not have built up many flips.
6923 	 */
6924 	if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
6925 		current->wakee_flips >>= 1;
6926 		current->wakee_flip_decay_ts = jiffies;
6927 	}
6928 
6929 	if (current->last_wakee != p) {
6930 		current->last_wakee = p;
6931 		current->wakee_flips++;
6932 	}
6933 }
6934 
6935 /*
6936  * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
6937  *
6938  * A waker of many should wake a different task than the one last awakened
6939  * at a frequency roughly N times higher than one of its wakees.
6940  *
6941  * In order to determine whether we should let the load spread vs consolidating
6942  * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
6943  * partner, and a factor of lls_size higher frequency in the other.
6944  *
6945  * With both conditions met, we can be relatively sure that the relationship is
6946  * non-monogamous, with partner count exceeding socket size.
6947  *
6948  * Waker/wakee being client/server, worker/dispatcher, interrupt source or
6949  * whatever is irrelevant, spread criteria is apparent partner count exceeds
6950  * socket size.
6951  */
wake_wide(struct task_struct * p)6952 static int wake_wide(struct task_struct *p)
6953 {
6954 	unsigned int master = current->wakee_flips;
6955 	unsigned int slave = p->wakee_flips;
6956 	int factor = __this_cpu_read(sd_llc_size);
6957 
6958 	if (master < slave)
6959 		swap(master, slave);
6960 	if (slave < factor || master < slave * factor)
6961 		return 0;
6962 	return 1;
6963 }
6964 
6965 /*
6966  * The purpose of wake_affine() is to quickly determine on which CPU we can run
6967  * soonest. For the purpose of speed we only consider the waking and previous
6968  * CPU.
6969  *
6970  * wake_affine_idle() - only considers 'now', it check if the waking CPU is
6971  *			cache-affine and is (or	will be) idle.
6972  *
6973  * wake_affine_weight() - considers the weight to reflect the average
6974  *			  scheduling latency of the CPUs. This seems to work
6975  *			  for the overloaded case.
6976  */
6977 static int
wake_affine_idle(int this_cpu,int prev_cpu,int sync)6978 wake_affine_idle(int this_cpu, int prev_cpu, int sync)
6979 {
6980 	/*
6981 	 * If this_cpu is idle, it implies the wakeup is from interrupt
6982 	 * context. Only allow the move if cache is shared. Otherwise an
6983 	 * interrupt intensive workload could force all tasks onto one
6984 	 * node depending on the IO topology or IRQ affinity settings.
6985 	 *
6986 	 * If the prev_cpu is idle and cache affine then avoid a migration.
6987 	 * There is no guarantee that the cache hot data from an interrupt
6988 	 * is more important than cache hot data on the prev_cpu and from
6989 	 * a cpufreq perspective, it's better to have higher utilisation
6990 	 * on one CPU.
6991 	 */
6992 	if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
6993 		return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
6994 
6995 	if (sync && cpu_rq(this_cpu)->nr_running == 1)
6996 		return this_cpu;
6997 
6998 	if (available_idle_cpu(prev_cpu))
6999 		return prev_cpu;
7000 
7001 	return nr_cpumask_bits;
7002 }
7003 
7004 static int
wake_affine_weight(struct sched_domain * sd,struct task_struct * p,int this_cpu,int prev_cpu,int sync)7005 wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
7006 		   int this_cpu, int prev_cpu, int sync)
7007 {
7008 	s64 this_eff_load, prev_eff_load;
7009 	unsigned long task_load;
7010 
7011 	this_eff_load = cpu_load(cpu_rq(this_cpu));
7012 
7013 	if (sync) {
7014 		unsigned long current_load = task_h_load(current);
7015 
7016 		if (current_load > this_eff_load)
7017 			return this_cpu;
7018 
7019 		this_eff_load -= current_load;
7020 	}
7021 
7022 	task_load = task_h_load(p);
7023 
7024 	this_eff_load += task_load;
7025 	if (sched_feat(WA_BIAS))
7026 		this_eff_load *= 100;
7027 	this_eff_load *= capacity_of(prev_cpu);
7028 
7029 	prev_eff_load = cpu_load(cpu_rq(prev_cpu));
7030 	prev_eff_load -= task_load;
7031 	if (sched_feat(WA_BIAS))
7032 		prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
7033 	prev_eff_load *= capacity_of(this_cpu);
7034 
7035 	/*
7036 	 * If sync, adjust the weight of prev_eff_load such that if
7037 	 * prev_eff == this_eff that select_idle_sibling() will consider
7038 	 * stacking the wakee on top of the waker if no other CPU is
7039 	 * idle.
7040 	 */
7041 	if (sync)
7042 		prev_eff_load += 1;
7043 
7044 	return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
7045 }
7046 
wake_affine(struct sched_domain * sd,struct task_struct * p,int this_cpu,int prev_cpu,int sync)7047 static int wake_affine(struct sched_domain *sd, struct task_struct *p,
7048 		       int this_cpu, int prev_cpu, int sync)
7049 {
7050 	int target = nr_cpumask_bits;
7051 
7052 	if (sched_feat(WA_IDLE))
7053 		target = wake_affine_idle(this_cpu, prev_cpu, sync);
7054 
7055 	if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
7056 		target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
7057 
7058 	schedstat_inc(p->stats.nr_wakeups_affine_attempts);
7059 	if (target != this_cpu)
7060 		return prev_cpu;
7061 
7062 	schedstat_inc(sd->ttwu_move_affine);
7063 	schedstat_inc(p->stats.nr_wakeups_affine);
7064 	return target;
7065 }
7066 
7067 static struct sched_group *
7068 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
7069 
7070 /*
7071  * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
7072  */
7073 static int
find_idlest_group_cpu(struct sched_group * group,struct task_struct * p,int this_cpu)7074 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
7075 {
7076 	unsigned long load, min_load = ULONG_MAX;
7077 	unsigned int min_exit_latency = UINT_MAX;
7078 	u64 latest_idle_timestamp = 0;
7079 	int least_loaded_cpu = this_cpu;
7080 	int shallowest_idle_cpu = -1;
7081 	int i;
7082 
7083 	/* Check if we have any choice: */
7084 	if (group->group_weight == 1)
7085 		return cpumask_first(sched_group_span(group));
7086 
7087 	/* Traverse only the allowed CPUs */
7088 	for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
7089 		struct rq *rq = cpu_rq(i);
7090 
7091 		if (!sched_core_cookie_match(rq, p))
7092 			continue;
7093 
7094 		if (sched_idle_cpu(i))
7095 			return i;
7096 
7097 		if (available_idle_cpu(i)) {
7098 			struct cpuidle_state *idle = idle_get_state(rq);
7099 			if (idle && idle->exit_latency < min_exit_latency) {
7100 				/*
7101 				 * We give priority to a CPU whose idle state
7102 				 * has the smallest exit latency irrespective
7103 				 * of any idle timestamp.
7104 				 */
7105 				min_exit_latency = idle->exit_latency;
7106 				latest_idle_timestamp = rq->idle_stamp;
7107 				shallowest_idle_cpu = i;
7108 			} else if ((!idle || idle->exit_latency == min_exit_latency) &&
7109 				   rq->idle_stamp > latest_idle_timestamp) {
7110 				/*
7111 				 * If equal or no active idle state, then
7112 				 * the most recently idled CPU might have
7113 				 * a warmer cache.
7114 				 */
7115 				latest_idle_timestamp = rq->idle_stamp;
7116 				shallowest_idle_cpu = i;
7117 			}
7118 		} else if (shallowest_idle_cpu == -1) {
7119 			load = cpu_load(cpu_rq(i));
7120 			if (load < min_load) {
7121 				min_load = load;
7122 				least_loaded_cpu = i;
7123 			}
7124 		}
7125 	}
7126 
7127 	return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
7128 }
7129 
find_idlest_cpu(struct sched_domain * sd,struct task_struct * p,int cpu,int prev_cpu,int sd_flag)7130 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
7131 				  int cpu, int prev_cpu, int sd_flag)
7132 {
7133 	int new_cpu = cpu;
7134 
7135 	if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
7136 		return prev_cpu;
7137 
7138 	/*
7139 	 * We need task's util for cpu_util_without, sync it up to
7140 	 * prev_cpu's last_update_time.
7141 	 */
7142 	if (!(sd_flag & SD_BALANCE_FORK))
7143 		sync_entity_load_avg(&p->se);
7144 
7145 	while (sd) {
7146 		struct sched_group *group;
7147 		struct sched_domain *tmp;
7148 		int weight;
7149 
7150 		if (!(sd->flags & sd_flag)) {
7151 			sd = sd->child;
7152 			continue;
7153 		}
7154 
7155 		group = find_idlest_group(sd, p, cpu);
7156 		if (!group) {
7157 			sd = sd->child;
7158 			continue;
7159 		}
7160 
7161 		new_cpu = find_idlest_group_cpu(group, p, cpu);
7162 		if (new_cpu == cpu) {
7163 			/* Now try balancing at a lower domain level of 'cpu': */
7164 			sd = sd->child;
7165 			continue;
7166 		}
7167 
7168 		/* Now try balancing at a lower domain level of 'new_cpu': */
7169 		cpu = new_cpu;
7170 		weight = sd->span_weight;
7171 		sd = NULL;
7172 		for_each_domain(cpu, tmp) {
7173 			if (weight <= tmp->span_weight)
7174 				break;
7175 			if (tmp->flags & sd_flag)
7176 				sd = tmp;
7177 		}
7178 	}
7179 
7180 	return new_cpu;
7181 }
7182 
__select_idle_cpu(int cpu,struct task_struct * p)7183 static inline int __select_idle_cpu(int cpu, struct task_struct *p)
7184 {
7185 	if ((available_idle_cpu(cpu) || sched_idle_cpu(cpu)) &&
7186 	    sched_cpu_cookie_match(cpu_rq(cpu), p))
7187 		return cpu;
7188 
7189 	return -1;
7190 }
7191 
7192 #ifdef CONFIG_SCHED_SMT
7193 DEFINE_STATIC_KEY_FALSE(sched_smt_present);
7194 EXPORT_SYMBOL_GPL(sched_smt_present);
7195 
set_idle_cores(int cpu,int val)7196 static inline void set_idle_cores(int cpu, int val)
7197 {
7198 	struct sched_domain_shared *sds;
7199 
7200 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
7201 	if (sds)
7202 		WRITE_ONCE(sds->has_idle_cores, val);
7203 }
7204 
test_idle_cores(int cpu)7205 static inline bool test_idle_cores(int cpu)
7206 {
7207 	struct sched_domain_shared *sds;
7208 
7209 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
7210 	if (sds)
7211 		return READ_ONCE(sds->has_idle_cores);
7212 
7213 	return false;
7214 }
7215 
7216 /*
7217  * Scans the local SMT mask to see if the entire core is idle, and records this
7218  * information in sd_llc_shared->has_idle_cores.
7219  *
7220  * Since SMT siblings share all cache levels, inspecting this limited remote
7221  * state should be fairly cheap.
7222  */
__update_idle_core(struct rq * rq)7223 void __update_idle_core(struct rq *rq)
7224 {
7225 	int core = cpu_of(rq);
7226 	int cpu;
7227 
7228 	rcu_read_lock();
7229 	if (test_idle_cores(core))
7230 		goto unlock;
7231 
7232 	for_each_cpu(cpu, cpu_smt_mask(core)) {
7233 		if (cpu == core)
7234 			continue;
7235 
7236 		if (!available_idle_cpu(cpu))
7237 			goto unlock;
7238 	}
7239 
7240 	set_idle_cores(core, 1);
7241 unlock:
7242 	rcu_read_unlock();
7243 }
7244 
7245 /*
7246  * Scan the entire LLC domain for idle cores; this dynamically switches off if
7247  * there are no idle cores left in the system; tracked through
7248  * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
7249  */
select_idle_core(struct task_struct * p,int core,struct cpumask * cpus,int * idle_cpu)7250 static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
7251 {
7252 	bool idle = true;
7253 	int cpu;
7254 
7255 	for_each_cpu(cpu, cpu_smt_mask(core)) {
7256 		if (!available_idle_cpu(cpu)) {
7257 			idle = false;
7258 			if (*idle_cpu == -1) {
7259 				if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, cpus)) {
7260 					*idle_cpu = cpu;
7261 					break;
7262 				}
7263 				continue;
7264 			}
7265 			break;
7266 		}
7267 		if (*idle_cpu == -1 && cpumask_test_cpu(cpu, cpus))
7268 			*idle_cpu = cpu;
7269 	}
7270 
7271 	if (idle)
7272 		return core;
7273 
7274 	cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
7275 	return -1;
7276 }
7277 
7278 /*
7279  * Scan the local SMT mask for idle CPUs.
7280  */
select_idle_smt(struct task_struct * p,struct sched_domain * sd,int target)7281 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
7282 {
7283 	int cpu;
7284 
7285 	for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
7286 		if (cpu == target)
7287 			continue;
7288 		/*
7289 		 * Check if the CPU is in the LLC scheduling domain of @target.
7290 		 * Due to isolcpus, there is no guarantee that all the siblings are in the domain.
7291 		 */
7292 		if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
7293 			continue;
7294 		if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
7295 			return cpu;
7296 	}
7297 
7298 	return -1;
7299 }
7300 
7301 #else /* CONFIG_SCHED_SMT */
7302 
set_idle_cores(int cpu,int val)7303 static inline void set_idle_cores(int cpu, int val)
7304 {
7305 }
7306 
test_idle_cores(int cpu)7307 static inline bool test_idle_cores(int cpu)
7308 {
7309 	return false;
7310 }
7311 
select_idle_core(struct task_struct * p,int core,struct cpumask * cpus,int * idle_cpu)7312 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
7313 {
7314 	return __select_idle_cpu(core, p);
7315 }
7316 
select_idle_smt(struct task_struct * p,struct sched_domain * sd,int target)7317 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
7318 {
7319 	return -1;
7320 }
7321 
7322 #endif /* CONFIG_SCHED_SMT */
7323 
7324 /*
7325  * Scan the LLC domain for idle CPUs; this is dynamically regulated by
7326  * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
7327  * average idle time for this rq (as found in rq->avg_idle).
7328  */
select_idle_cpu(struct task_struct * p,struct sched_domain * sd,bool has_idle_core,int target)7329 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
7330 {
7331 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
7332 	int i, cpu, idle_cpu = -1, nr = INT_MAX;
7333 	struct sched_domain_shared *sd_share;
7334 	struct rq *this_rq = this_rq();
7335 	int this = smp_processor_id();
7336 	struct sched_domain *this_sd = NULL;
7337 	u64 time = 0;
7338 
7339 	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
7340 
7341 	if (sched_feat(SIS_PROP) && !has_idle_core) {
7342 		u64 avg_cost, avg_idle, span_avg;
7343 		unsigned long now = jiffies;
7344 
7345 		this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
7346 		if (!this_sd)
7347 			return -1;
7348 
7349 		/*
7350 		 * If we're busy, the assumption that the last idle period
7351 		 * predicts the future is flawed; age away the remaining
7352 		 * predicted idle time.
7353 		 */
7354 		if (unlikely(this_rq->wake_stamp < now)) {
7355 			while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) {
7356 				this_rq->wake_stamp++;
7357 				this_rq->wake_avg_idle >>= 1;
7358 			}
7359 		}
7360 
7361 		avg_idle = this_rq->wake_avg_idle;
7362 		avg_cost = this_sd->avg_scan_cost + 1;
7363 
7364 		span_avg = sd->span_weight * avg_idle;
7365 		if (span_avg > 4*avg_cost)
7366 			nr = div_u64(span_avg, avg_cost);
7367 		else
7368 			nr = 4;
7369 
7370 		time = cpu_clock(this);
7371 	}
7372 
7373 	if (sched_feat(SIS_UTIL)) {
7374 		sd_share = rcu_dereference(per_cpu(sd_llc_shared, target));
7375 		if (sd_share) {
7376 			/* because !--nr is the condition to stop scan */
7377 			nr = READ_ONCE(sd_share->nr_idle_scan) + 1;
7378 			/* overloaded LLC is unlikely to have idle cpu/core */
7379 			if (nr == 1)
7380 				return -1;
7381 		}
7382 	}
7383 
7384 	for_each_cpu_wrap(cpu, cpus, target + 1) {
7385 		if (has_idle_core) {
7386 			i = select_idle_core(p, cpu, cpus, &idle_cpu);
7387 			if ((unsigned int)i < nr_cpumask_bits)
7388 				return i;
7389 
7390 		} else {
7391 			if (!--nr)
7392 				return -1;
7393 			idle_cpu = __select_idle_cpu(cpu, p);
7394 			if ((unsigned int)idle_cpu < nr_cpumask_bits)
7395 				break;
7396 		}
7397 	}
7398 
7399 	if (has_idle_core)
7400 		set_idle_cores(target, false);
7401 
7402 	if (sched_feat(SIS_PROP) && this_sd && !has_idle_core) {
7403 		time = cpu_clock(this) - time;
7404 
7405 		/*
7406 		 * Account for the scan cost of wakeups against the average
7407 		 * idle time.
7408 		 */
7409 		this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time);
7410 
7411 		update_avg(&this_sd->avg_scan_cost, time);
7412 	}
7413 
7414 	return idle_cpu;
7415 }
7416 
7417 /*
7418  * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
7419  * the task fits. If no CPU is big enough, but there are idle ones, try to
7420  * maximize capacity.
7421  */
7422 static int
select_idle_capacity(struct task_struct * p,struct sched_domain * sd,int target)7423 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
7424 {
7425 	unsigned long task_util, util_min, util_max, best_cap = 0;
7426 	int fits, best_fits = 0;
7427 	int cpu, best_cpu = -1;
7428 	struct cpumask *cpus;
7429 
7430 	cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
7431 	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
7432 
7433 	task_util = task_util_est(p);
7434 	util_min = uclamp_eff_value(p, UCLAMP_MIN);
7435 	util_max = uclamp_eff_value(p, UCLAMP_MAX);
7436 
7437 	for_each_cpu_wrap(cpu, cpus, target) {
7438 		unsigned long cpu_cap = capacity_of(cpu);
7439 
7440 		if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
7441 			continue;
7442 
7443 		fits = util_fits_cpu(task_util, util_min, util_max, cpu);
7444 
7445 		/* This CPU fits with all requirements */
7446 		if (fits > 0)
7447 			return cpu;
7448 		/*
7449 		 * Only the min performance hint (i.e. uclamp_min) doesn't fit.
7450 		 * Look for the CPU with best capacity.
7451 		 */
7452 		else if (fits < 0)
7453 			cpu_cap = capacity_orig_of(cpu) - thermal_load_avg(cpu_rq(cpu));
7454 
7455 		/*
7456 		 * First, select CPU which fits better (-1 being better than 0).
7457 		 * Then, select the one with best capacity at same level.
7458 		 */
7459 		if ((fits < best_fits) ||
7460 		    ((fits == best_fits) && (cpu_cap > best_cap))) {
7461 			best_cap = cpu_cap;
7462 			best_cpu = cpu;
7463 			best_fits = fits;
7464 		}
7465 	}
7466 
7467 	return best_cpu;
7468 }
7469 
asym_fits_cpu(unsigned long util,unsigned long util_min,unsigned long util_max,int cpu)7470 static inline bool asym_fits_cpu(unsigned long util,
7471 				 unsigned long util_min,
7472 				 unsigned long util_max,
7473 				 int cpu)
7474 {
7475 	if (sched_asym_cpucap_active())
7476 		/*
7477 		 * Return true only if the cpu fully fits the task requirements
7478 		 * which include the utilization and the performance hints.
7479 		 */
7480 		return (util_fits_cpu(util, util_min, util_max, cpu) > 0);
7481 
7482 	return true;
7483 }
7484 
7485 /*
7486  * Try and locate an idle core/thread in the LLC cache domain.
7487  */
select_idle_sibling(struct task_struct * p,int prev,int target)7488 static int select_idle_sibling(struct task_struct *p, int prev, int target)
7489 {
7490 	bool has_idle_core = false;
7491 	struct sched_domain *sd;
7492 	unsigned long task_util, util_min, util_max;
7493 	int i, recent_used_cpu;
7494 
7495 	/*
7496 	 * On asymmetric system, update task utilization because we will check
7497 	 * that the task fits with cpu's capacity.
7498 	 */
7499 	if (sched_asym_cpucap_active()) {
7500 		sync_entity_load_avg(&p->se);
7501 		task_util = task_util_est(p);
7502 		util_min = uclamp_eff_value(p, UCLAMP_MIN);
7503 		util_max = uclamp_eff_value(p, UCLAMP_MAX);
7504 	}
7505 
7506 	/*
7507 	 * per-cpu select_rq_mask usage
7508 	 */
7509 	lockdep_assert_irqs_disabled();
7510 
7511 	if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
7512 	    asym_fits_cpu(task_util, util_min, util_max, target))
7513 		return target;
7514 
7515 	/*
7516 	 * If the previous CPU is cache affine and idle, don't be stupid:
7517 	 */
7518 	if (prev != target && cpus_share_cache(prev, target) &&
7519 	    (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
7520 	    asym_fits_cpu(task_util, util_min, util_max, prev))
7521 		return prev;
7522 
7523 	/*
7524 	 * Allow a per-cpu kthread to stack with the wakee if the
7525 	 * kworker thread and the tasks previous CPUs are the same.
7526 	 * The assumption is that the wakee queued work for the
7527 	 * per-cpu kthread that is now complete and the wakeup is
7528 	 * essentially a sync wakeup. An obvious example of this
7529 	 * pattern is IO completions.
7530 	 */
7531 	if (is_per_cpu_kthread(current) &&
7532 	    in_task() &&
7533 	    prev == smp_processor_id() &&
7534 	    this_rq()->nr_running <= 1 &&
7535 	    asym_fits_cpu(task_util, util_min, util_max, prev)) {
7536 		return prev;
7537 	}
7538 
7539 	/* Check a recently used CPU as a potential idle candidate: */
7540 	recent_used_cpu = p->recent_used_cpu;
7541 	p->recent_used_cpu = prev;
7542 	if (recent_used_cpu != prev &&
7543 	    recent_used_cpu != target &&
7544 	    cpus_share_cache(recent_used_cpu, target) &&
7545 	    (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
7546 	    cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) &&
7547 	    asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
7548 		return recent_used_cpu;
7549 	}
7550 
7551 	/*
7552 	 * For asymmetric CPU capacity systems, our domain of interest is
7553 	 * sd_asym_cpucapacity rather than sd_llc.
7554 	 */
7555 	if (sched_asym_cpucap_active()) {
7556 		sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
7557 		/*
7558 		 * On an asymmetric CPU capacity system where an exclusive
7559 		 * cpuset defines a symmetric island (i.e. one unique
7560 		 * capacity_orig value through the cpuset), the key will be set
7561 		 * but the CPUs within that cpuset will not have a domain with
7562 		 * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
7563 		 * capacity path.
7564 		 */
7565 		if (sd) {
7566 			i = select_idle_capacity(p, sd, target);
7567 			return ((unsigned)i < nr_cpumask_bits) ? i : target;
7568 		}
7569 	}
7570 
7571 	sd = rcu_dereference(per_cpu(sd_llc, target));
7572 	if (!sd)
7573 		return target;
7574 
7575 	if (sched_smt_active()) {
7576 		has_idle_core = test_idle_cores(target);
7577 
7578 		if (!has_idle_core && cpus_share_cache(prev, target)) {
7579 			i = select_idle_smt(p, sd, prev);
7580 			if ((unsigned int)i < nr_cpumask_bits)
7581 				return i;
7582 		}
7583 	}
7584 
7585 	i = select_idle_cpu(p, sd, has_idle_core, target);
7586 	if ((unsigned)i < nr_cpumask_bits)
7587 		return i;
7588 
7589 	return target;
7590 }
7591 
7592 /**
7593  * cpu_util() - Estimates the amount of CPU capacity used by CFS tasks.
7594  * @cpu: the CPU to get the utilization for
7595  * @p: task for which the CPU utilization should be predicted or NULL
7596  * @dst_cpu: CPU @p migrates to, -1 if @p moves from @cpu or @p == NULL
7597  * @boost: 1 to enable boosting, otherwise 0
7598  *
7599  * The unit of the return value must be the same as the one of CPU capacity
7600  * so that CPU utilization can be compared with CPU capacity.
7601  *
7602  * CPU utilization is the sum of running time of runnable tasks plus the
7603  * recent utilization of currently non-runnable tasks on that CPU.
7604  * It represents the amount of CPU capacity currently used by CFS tasks in
7605  * the range [0..max CPU capacity] with max CPU capacity being the CPU
7606  * capacity at f_max.
7607  *
7608  * The estimated CPU utilization is defined as the maximum between CPU
7609  * utilization and sum of the estimated utilization of the currently
7610  * runnable tasks on that CPU. It preserves a utilization "snapshot" of
7611  * previously-executed tasks, which helps better deduce how busy a CPU will
7612  * be when a long-sleeping task wakes up. The contribution to CPU utilization
7613  * of such a task would be significantly decayed at this point of time.
7614  *
7615  * Boosted CPU utilization is defined as max(CPU runnable, CPU utilization).
7616  * CPU contention for CFS tasks can be detected by CPU runnable > CPU
7617  * utilization. Boosting is implemented in cpu_util() so that internal
7618  * users (e.g. EAS) can use it next to external users (e.g. schedutil),
7619  * latter via cpu_util_cfs_boost().
7620  *
7621  * CPU utilization can be higher than the current CPU capacity
7622  * (f_curr/f_max * max CPU capacity) or even the max CPU capacity because
7623  * of rounding errors as well as task migrations or wakeups of new tasks.
7624  * CPU utilization has to be capped to fit into the [0..max CPU capacity]
7625  * range. Otherwise a group of CPUs (CPU0 util = 121% + CPU1 util = 80%)
7626  * could be seen as over-utilized even though CPU1 has 20% of spare CPU
7627  * capacity. CPU utilization is allowed to overshoot current CPU capacity
7628  * though since this is useful for predicting the CPU capacity required
7629  * after task migrations (scheduler-driven DVFS).
7630  *
7631  * Return: (Boosted) (estimated) utilization for the specified CPU.
7632  */
7633 static unsigned long
cpu_util(int cpu,struct task_struct * p,int dst_cpu,int boost)7634 cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
7635 {
7636 	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
7637 	unsigned long util = READ_ONCE(cfs_rq->avg.util_avg);
7638 	unsigned long runnable;
7639 
7640 	if (boost) {
7641 		runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
7642 		util = max(util, runnable);
7643 	}
7644 
7645 	/*
7646 	 * If @dst_cpu is -1 or @p migrates from @cpu to @dst_cpu remove its
7647 	 * contribution. If @p migrates from another CPU to @cpu add its
7648 	 * contribution. In all the other cases @cpu is not impacted by the
7649 	 * migration so its util_avg is already correct.
7650 	 */
7651 	if (p && task_cpu(p) == cpu && dst_cpu != cpu)
7652 		lsub_positive(&util, task_util(p));
7653 	else if (p && task_cpu(p) != cpu && dst_cpu == cpu)
7654 		util += task_util(p);
7655 
7656 	if (sched_feat(UTIL_EST)) {
7657 		unsigned long util_est;
7658 
7659 		util_est = READ_ONCE(cfs_rq->avg.util_est);
7660 
7661 		/*
7662 		 * During wake-up @p isn't enqueued yet and doesn't contribute
7663 		 * to any cpu_rq(cpu)->cfs.avg.util_est.
7664 		 * If @dst_cpu == @cpu add it to "simulate" cpu_util after @p
7665 		 * has been enqueued.
7666 		 *
7667 		 * During exec (@dst_cpu = -1) @p is enqueued and does
7668 		 * contribute to cpu_rq(cpu)->cfs.util_est.
7669 		 * Remove it to "simulate" cpu_util without @p's contribution.
7670 		 *
7671 		 * Despite the task_on_rq_queued(@p) check there is still a
7672 		 * small window for a possible race when an exec
7673 		 * select_task_rq_fair() races with LB's detach_task().
7674 		 *
7675 		 *   detach_task()
7676 		 *     deactivate_task()
7677 		 *       p->on_rq = TASK_ON_RQ_MIGRATING;
7678 		 *       -------------------------------- A
7679 		 *       dequeue_task()                    \
7680 		 *         dequeue_task_fair()              + Race Time
7681 		 *           util_est_dequeue()            /
7682 		 *       -------------------------------- B
7683 		 *
7684 		 * The additional check "current == p" is required to further
7685 		 * reduce the race window.
7686 		 */
7687 		if (dst_cpu == cpu)
7688 			util_est += _task_util_est(p);
7689 		else if (p && unlikely(task_on_rq_queued(p) || current == p))
7690 			lsub_positive(&util_est, _task_util_est(p));
7691 
7692 		util = max(util, util_est);
7693 	}
7694 
7695 	return min(util, capacity_orig_of(cpu));
7696 }
7697 
cpu_util_cfs(int cpu)7698 unsigned long cpu_util_cfs(int cpu)
7699 {
7700 	return cpu_util(cpu, NULL, -1, 0);
7701 }
7702 
cpu_util_cfs_boost(int cpu)7703 unsigned long cpu_util_cfs_boost(int cpu)
7704 {
7705 	unsigned long util = INT_MAX;
7706 
7707 	trace_android_rvh_cpu_util_cfs_boost(cpu, &util);
7708 	if (util != INT_MAX)
7709 		return util;
7710 
7711 	return cpu_util(cpu, NULL, -1, 1);
7712 }
7713 
7714 /*
7715  * cpu_util_without: compute cpu utilization without any contributions from *p
7716  * @cpu: the CPU which utilization is requested
7717  * @p: the task which utilization should be discounted
7718  *
7719  * The utilization of a CPU is defined by the utilization of tasks currently
7720  * enqueued on that CPU as well as tasks which are currently sleeping after an
7721  * execution on that CPU.
7722  *
7723  * This method returns the utilization of the specified CPU by discounting the
7724  * utilization of the specified task, whenever the task is currently
7725  * contributing to the CPU utilization.
7726  */
cpu_util_without(int cpu,struct task_struct * p)7727 static unsigned long cpu_util_without(int cpu, struct task_struct *p)
7728 {
7729 	/* Task has no contribution or is new */
7730 	if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
7731 		p = NULL;
7732 
7733 	return cpu_util(cpu, p, -1, 0);
7734 }
7735 
7736 /*
7737  * energy_env - Utilization landscape for energy estimation.
7738  * @task_busy_time: Utilization contribution by the task for which we test the
7739  *                  placement. Given by eenv_task_busy_time().
7740  * @pd_busy_time:   Utilization of the whole perf domain without the task
7741  *                  contribution. Given by eenv_pd_busy_time().
7742  * @cpu_cap:        Maximum CPU capacity for the perf domain.
7743  * @pd_cap:         Entire perf domain capacity. (pd->nr_cpus * cpu_cap).
7744  */
7745 struct energy_env {
7746 	unsigned long task_busy_time;
7747 	unsigned long pd_busy_time;
7748 	unsigned long cpu_cap;
7749 	unsigned long pd_cap;
7750 };
7751 
7752 /*
7753  * Compute the task busy time for compute_energy(). This time cannot be
7754  * injected directly into effective_cpu_util() because of the IRQ scaling.
7755  * The latter only makes sense with the most recent CPUs where the task has
7756  * run.
7757  */
eenv_task_busy_time(struct energy_env * eenv,struct task_struct * p,int prev_cpu)7758 static inline void eenv_task_busy_time(struct energy_env *eenv,
7759 				       struct task_struct *p, int prev_cpu)
7760 {
7761 	unsigned long busy_time, max_cap = arch_scale_cpu_capacity(prev_cpu);
7762 	unsigned long irq = cpu_util_irq(cpu_rq(prev_cpu));
7763 
7764 	if (unlikely(irq >= max_cap))
7765 		busy_time = max_cap;
7766 	else
7767 		busy_time = scale_irq_capacity(task_util_est(p), irq, max_cap);
7768 
7769 	eenv->task_busy_time = busy_time;
7770 }
7771 
7772 /*
7773  * Compute the perf_domain (PD) busy time for compute_energy(). Based on the
7774  * utilization for each @pd_cpus, it however doesn't take into account
7775  * clamping since the ratio (utilization / cpu_capacity) is already enough to
7776  * scale the EM reported power consumption at the (eventually clamped)
7777  * cpu_capacity.
7778  *
7779  * The contribution of the task @p for which we want to estimate the
7780  * energy cost is removed (by cpu_util()) and must be calculated
7781  * separately (see eenv_task_busy_time). This ensures:
7782  *
7783  *   - A stable PD utilization, no matter which CPU of that PD we want to place
7784  *     the task on.
7785  *
7786  *   - A fair comparison between CPUs as the task contribution (task_util())
7787  *     will always be the same no matter which CPU utilization we rely on
7788  *     (util_avg or util_est).
7789  *
7790  * Set @eenv busy time for the PD that spans @pd_cpus. This busy time can't
7791  * exceed @eenv->pd_cap.
7792  */
eenv_pd_busy_time(struct energy_env * eenv,struct cpumask * pd_cpus,struct task_struct * p)7793 static inline void eenv_pd_busy_time(struct energy_env *eenv,
7794 				     struct cpumask *pd_cpus,
7795 				     struct task_struct *p)
7796 {
7797 	unsigned long busy_time = 0;
7798 	int cpu;
7799 
7800 	for_each_cpu(cpu, pd_cpus) {
7801 		unsigned long util = cpu_util(cpu, p, -1, 0);
7802 
7803 		busy_time += effective_cpu_util(cpu, util, ENERGY_UTIL, NULL);
7804 	}
7805 
7806 	eenv->pd_busy_time = min(eenv->pd_cap, busy_time);
7807 }
7808 
7809 /*
7810  * Compute the maximum utilization for compute_energy() when the task @p
7811  * is placed on the cpu @dst_cpu.
7812  *
7813  * Returns the maximum utilization among @eenv->cpus. This utilization can't
7814  * exceed @eenv->cpu_cap.
7815  */
7816 static inline unsigned long
eenv_pd_max_util(struct energy_env * eenv,struct cpumask * pd_cpus,struct task_struct * p,int dst_cpu)7817 eenv_pd_max_util(struct energy_env *eenv, struct cpumask *pd_cpus,
7818 		 struct task_struct *p, int dst_cpu)
7819 {
7820 	unsigned long max_util = 0;
7821 	int cpu;
7822 
7823 	for_each_cpu(cpu, pd_cpus) {
7824 		struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL;
7825 		unsigned long util = cpu_util(cpu, p, dst_cpu, 1);
7826 		unsigned long eff_util;
7827 
7828 		/*
7829 		 * Performance domain frequency: utilization clamping
7830 		 * must be considered since it affects the selection
7831 		 * of the performance domain frequency.
7832 		 * NOTE: in case RT tasks are running, by default the
7833 		 * FREQUENCY_UTIL's utilization can be max OPP.
7834 		 */
7835 		eff_util = effective_cpu_util(cpu, util, FREQUENCY_UTIL, tsk);
7836 		max_util = max(max_util, eff_util);
7837 	}
7838 
7839 	return min(max_util, eenv->cpu_cap);
7840 }
7841 
7842 /*
7843  * compute_energy(): Use the Energy Model to estimate the energy that @pd would
7844  * consume for a given utilization landscape @eenv. When @dst_cpu < 0, the task
7845  * contribution is ignored.
7846  */
7847 static inline unsigned long
compute_energy(struct energy_env * eenv,struct perf_domain * pd,struct cpumask * pd_cpus,struct task_struct * p,int dst_cpu)7848 compute_energy(struct energy_env *eenv, struct perf_domain *pd,
7849 	       struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu)
7850 {
7851 	unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu);
7852 	unsigned long busy_time = eenv->pd_busy_time;
7853 
7854 	if (dst_cpu >= 0)
7855 		busy_time = min(eenv->pd_cap, busy_time + eenv->task_busy_time);
7856 
7857 	return em_cpu_energy(pd->em_pd, max_util, busy_time, eenv->cpu_cap);
7858 }
7859 
7860 /*
7861  * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
7862  * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
7863  * spare capacity in each performance domain and uses it as a potential
7864  * candidate to execute the task. Then, it uses the Energy Model to figure
7865  * out which of the CPU candidates is the most energy-efficient.
7866  *
7867  * The rationale for this heuristic is as follows. In a performance domain,
7868  * all the most energy efficient CPU candidates (according to the Energy
7869  * Model) are those for which we'll request a low frequency. When there are
7870  * several CPUs for which the frequency request will be the same, we don't
7871  * have enough data to break the tie between them, because the Energy Model
7872  * only includes active power costs. With this model, if we assume that
7873  * frequency requests follow utilization (e.g. using schedutil), the CPU with
7874  * the maximum spare capacity in a performance domain is guaranteed to be among
7875  * the best candidates of the performance domain.
7876  *
7877  * In practice, it could be preferable from an energy standpoint to pack
7878  * small tasks on a CPU in order to let other CPUs go in deeper idle states,
7879  * but that could also hurt our chances to go cluster idle, and we have no
7880  * ways to tell with the current Energy Model if this is actually a good
7881  * idea or not. So, find_energy_efficient_cpu() basically favors
7882  * cluster-packing, and spreading inside a cluster. That should at least be
7883  * a good thing for latency, and this is consistent with the idea that most
7884  * of the energy savings of EAS come from the asymmetry of the system, and
7885  * not so much from breaking the tie between identical CPUs. That's also the
7886  * reason why EAS is enabled in the topology code only for systems where
7887  * SD_ASYM_CPUCAPACITY is set.
7888  *
7889  * NOTE: Forkees are not accepted in the energy-aware wake-up path because
7890  * they don't have any useful utilization data yet and it's not possible to
7891  * forecast their impact on energy consumption. Consequently, they will be
7892  * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
7893  * to be energy-inefficient in some use-cases. The alternative would be to
7894  * bias new tasks towards specific types of CPUs first, or to try to infer
7895  * their util_avg from the parent task, but those heuristics could hurt
7896  * other use-cases too. So, until someone finds a better way to solve this,
7897  * let's keep things simple by re-using the existing slow path.
7898  */
find_energy_efficient_cpu(struct task_struct * p,int prev_cpu,int sync)7899 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
7900 {
7901 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
7902 	unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
7903 	unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0;
7904 	unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024;
7905 	struct root_domain *rd = this_rq()->rd;
7906 	int cpu, best_energy_cpu, target = -1;
7907 	int prev_fits = -1, best_fits = -1;
7908 	unsigned long best_thermal_cap = 0;
7909 	unsigned long prev_thermal_cap = 0;
7910 	struct sched_domain *sd;
7911 	struct perf_domain *pd;
7912 	struct energy_env eenv;
7913 	int new_cpu = INT_MAX;
7914 
7915 	trace_android_rvh_find_energy_efficient_cpu(p, prev_cpu, sync, &new_cpu);
7916 	if (new_cpu != INT_MAX)
7917 		return new_cpu;
7918 
7919 	sync_entity_load_avg(&p->se);
7920 
7921 	rcu_read_lock();
7922 	pd = rcu_dereference(rd->pd);
7923 	if (!pd || READ_ONCE(rd->overutilized))
7924 		goto unlock;
7925 
7926 	cpu = smp_processor_id();
7927 	if (sync && cpu_rq(cpu)->nr_running == 1 &&
7928 	    cpumask_test_cpu(cpu, p->cpus_ptr) &&
7929 	    task_fits_cpu(p, cpu)) {
7930 		rcu_read_unlock();
7931 		return cpu;
7932 	}
7933 
7934 	/*
7935 	 * Energy-aware wake-up happens on the lowest sched_domain starting
7936 	 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
7937 	 */
7938 	sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
7939 	while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
7940 		sd = sd->parent;
7941 	if (!sd)
7942 		goto unlock;
7943 
7944 	target = prev_cpu;
7945 
7946 	sync_entity_load_avg(&p->se);
7947 	if (!task_util_est(p) && p_util_min == 0)
7948 		goto unlock;
7949 
7950 	eenv_task_busy_time(&eenv, p, prev_cpu);
7951 
7952 	for (; pd; pd = pd->next) {
7953 		unsigned long util_min = p_util_min, util_max = p_util_max;
7954 		unsigned long cpu_cap, cpu_thermal_cap, util;
7955 		long prev_spare_cap = -1, max_spare_cap = -1;
7956 		unsigned long rq_util_min, rq_util_max;
7957 		unsigned long cur_delta, base_energy;
7958 		int max_spare_cap_cpu = -1;
7959 		int fits, max_fits = -1;
7960 
7961 		cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
7962 
7963 		if (cpumask_empty(cpus))
7964 			continue;
7965 
7966 		/* Account thermal pressure for the energy estimation */
7967 		cpu = cpumask_first(cpus);
7968 		cpu_thermal_cap = arch_scale_cpu_capacity(cpu);
7969 		cpu_thermal_cap -= arch_scale_thermal_pressure(cpu);
7970 
7971 		eenv.cpu_cap = cpu_thermal_cap;
7972 		eenv.pd_cap = 0;
7973 
7974 		for_each_cpu(cpu, cpus) {
7975 			struct rq *rq = cpu_rq(cpu);
7976 
7977 			eenv.pd_cap += cpu_thermal_cap;
7978 
7979 			if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
7980 				continue;
7981 
7982 			if (!cpumask_test_cpu(cpu, p->cpus_ptr))
7983 				continue;
7984 
7985 			util = cpu_util(cpu, p, cpu, 0);
7986 			cpu_cap = capacity_of(cpu);
7987 
7988 			/*
7989 			 * Skip CPUs that cannot satisfy the capacity request.
7990 			 * IOW, placing the task there would make the CPU
7991 			 * overutilized. Take uclamp into account to see how
7992 			 * much capacity we can get out of the CPU; this is
7993 			 * aligned with sched_cpu_util().
7994 			 */
7995 			if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
7996 				/*
7997 				 * Open code uclamp_rq_util_with() except for
7998 				 * the clamp() part. Ie: apply max aggregation
7999 				 * only. util_fits_cpu() logic requires to
8000 				 * operate on non clamped util but must use the
8001 				 * max-aggregated uclamp_{min, max}.
8002 				 */
8003 				rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
8004 				rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
8005 
8006 				util_min = max(rq_util_min, p_util_min);
8007 				util_max = max(rq_util_max, p_util_max);
8008 			}
8009 
8010 			fits = util_fits_cpu(util, util_min, util_max, cpu);
8011 			if (!fits)
8012 				continue;
8013 
8014 			lsub_positive(&cpu_cap, util);
8015 
8016 			if (cpu == prev_cpu) {
8017 				/* Always use prev_cpu as a candidate. */
8018 				prev_spare_cap = cpu_cap;
8019 				prev_fits = fits;
8020 			} else if ((fits > max_fits) ||
8021 				   ((fits == max_fits) && ((long)cpu_cap > max_spare_cap))) {
8022 				/*
8023 				 * Find the CPU with the maximum spare capacity
8024 				 * among the remaining CPUs in the performance
8025 				 * domain.
8026 				 */
8027 				max_spare_cap = cpu_cap;
8028 				max_spare_cap_cpu = cpu;
8029 				max_fits = fits;
8030 			}
8031 		}
8032 
8033 		if (max_spare_cap_cpu < 0 && prev_spare_cap < 0)
8034 			continue;
8035 
8036 		eenv_pd_busy_time(&eenv, cpus, p);
8037 		/* Compute the 'base' energy of the pd, without @p */
8038 		base_energy = compute_energy(&eenv, pd, cpus, p, -1);
8039 
8040 		/* Evaluate the energy impact of using prev_cpu. */
8041 		if (prev_spare_cap > -1) {
8042 			prev_delta = compute_energy(&eenv, pd, cpus, p,
8043 						    prev_cpu);
8044 			/* CPU utilization has changed */
8045 			if (prev_delta < base_energy)
8046 				goto unlock;
8047 			prev_delta -= base_energy;
8048 			prev_thermal_cap = cpu_thermal_cap;
8049 			best_delta = min(best_delta, prev_delta);
8050 		}
8051 
8052 		/* Evaluate the energy impact of using max_spare_cap_cpu. */
8053 		if (max_spare_cap_cpu >= 0 && max_spare_cap > prev_spare_cap) {
8054 			/* Current best energy cpu fits better */
8055 			if (max_fits < best_fits)
8056 				continue;
8057 
8058 			/*
8059 			 * Both don't fit performance hint (i.e. uclamp_min)
8060 			 * but best energy cpu has better capacity.
8061 			 */
8062 			if ((max_fits < 0) &&
8063 			    (cpu_thermal_cap <= best_thermal_cap))
8064 				continue;
8065 
8066 			cur_delta = compute_energy(&eenv, pd, cpus, p,
8067 						   max_spare_cap_cpu);
8068 			/* CPU utilization has changed */
8069 			if (cur_delta < base_energy)
8070 				goto unlock;
8071 			cur_delta -= base_energy;
8072 
8073 			/*
8074 			 * Both fit for the task but best energy cpu has lower
8075 			 * energy impact.
8076 			 */
8077 			if ((max_fits > 0) && (best_fits > 0) &&
8078 			    (cur_delta >= best_delta))
8079 				continue;
8080 
8081 			best_delta = cur_delta;
8082 			best_energy_cpu = max_spare_cap_cpu;
8083 			best_fits = max_fits;
8084 			best_thermal_cap = cpu_thermal_cap;
8085 		}
8086 	}
8087 	rcu_read_unlock();
8088 
8089 	if ((best_fits > prev_fits) ||
8090 	    ((best_fits > 0) && (best_delta < prev_delta)) ||
8091 	    ((best_fits < 0) && (best_thermal_cap > prev_thermal_cap)))
8092 		target = best_energy_cpu;
8093 
8094 	return target;
8095 
8096 unlock:
8097 	rcu_read_unlock();
8098 
8099 	return target;
8100 }
8101 
8102 /*
8103  * select_task_rq_fair: Select target runqueue for the waking task in domains
8104  * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
8105  * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
8106  *
8107  * Balances load by selecting the idlest CPU in the idlest group, or under
8108  * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
8109  *
8110  * Returns the target CPU number.
8111  */
8112 static int
select_task_rq_fair(struct task_struct * p,int prev_cpu,int wake_flags)8113 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
8114 {
8115 	int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
8116 	struct sched_domain *tmp, *sd = NULL;
8117 	int cpu = smp_processor_id();
8118 	int new_cpu = prev_cpu;
8119 	int want_affine = 0;
8120 	int target_cpu = -1;
8121 	/* SD_flags and WF_flags share the first nibble */
8122 	int sd_flag = wake_flags & 0xF;
8123 
8124 	if (trace_android_rvh_select_task_rq_fair_enabled() &&
8125 	    !(sd_flag & SD_BALANCE_FORK))
8126 		sync_entity_load_avg(&p->se);
8127 	trace_android_rvh_select_task_rq_fair(p, prev_cpu, sd_flag,
8128 			wake_flags, &target_cpu);
8129 	if (target_cpu >= 0)
8130 		return target_cpu;
8131 
8132 	/*
8133 	 * required for stable ->cpus_allowed
8134 	 */
8135 	lockdep_assert_held(&p->pi_lock);
8136 	if (wake_flags & WF_TTWU) {
8137 		record_wakee(p);
8138 
8139 		if ((wake_flags & WF_CURRENT_CPU) &&
8140 		    cpumask_test_cpu(cpu, p->cpus_ptr))
8141 			return cpu;
8142 
8143 		if (sched_energy_enabled()) {
8144 			new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
8145 			if (new_cpu >= 0)
8146 				return new_cpu;
8147 			new_cpu = prev_cpu;
8148 		}
8149 
8150 		want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
8151 	}
8152 
8153 	rcu_read_lock();
8154 	for_each_domain(cpu, tmp) {
8155 		/*
8156 		 * If both 'cpu' and 'prev_cpu' are part of this domain,
8157 		 * cpu is a valid SD_WAKE_AFFINE target.
8158 		 */
8159 		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
8160 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
8161 			if (cpu != prev_cpu)
8162 				new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
8163 
8164 			sd = NULL; /* Prefer wake_affine over balance flags */
8165 			break;
8166 		}
8167 
8168 		/*
8169 		 * Usually only true for WF_EXEC and WF_FORK, as sched_domains
8170 		 * usually do not have SD_BALANCE_WAKE set. That means wakeup
8171 		 * will usually go to the fast path.
8172 		 */
8173 		if (tmp->flags & sd_flag)
8174 			sd = tmp;
8175 		else if (!want_affine)
8176 			break;
8177 	}
8178 
8179 	if (unlikely(sd)) {
8180 		/* Slow path */
8181 		new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
8182 	} else if (wake_flags & WF_TTWU) { /* XXX always ? */
8183 		/* Fast path */
8184 		new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
8185 	}
8186 	rcu_read_unlock();
8187 
8188 	return new_cpu;
8189 }
8190 
8191 /*
8192  * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
8193  * cfs_rq_of(p) references at time of call are still valid and identify the
8194  * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
8195  */
migrate_task_rq_fair(struct task_struct * p,int new_cpu)8196 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
8197 {
8198 	struct sched_entity *se = &p->se;
8199 
8200 	if (!task_on_rq_migrating(p)) {
8201 		remove_entity_load_avg(se);
8202 
8203 		/*
8204 		 * Here, the task's PELT values have been updated according to
8205 		 * the current rq's clock. But if that clock hasn't been
8206 		 * updated in a while, a substantial idle time will be missed,
8207 		 * leading to an inflation after wake-up on the new rq.
8208 		 *
8209 		 * Estimate the missing time from the cfs_rq last_update_time
8210 		 * and update sched_avg to improve the PELT continuity after
8211 		 * migration.
8212 		 */
8213 		migrate_se_pelt_lag(se);
8214 	}
8215 
8216 	/* Tell new CPU we are migrated */
8217 	se->avg.last_update_time = 0;
8218 
8219 	update_scan_period(p, new_cpu);
8220 }
8221 
task_dead_fair(struct task_struct * p)8222 static void task_dead_fair(struct task_struct *p)
8223 {
8224 	remove_entity_load_avg(&p->se);
8225 }
8226 
8227 static int
balance_fair(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)8228 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
8229 {
8230 	if (rq->nr_running)
8231 		return 1;
8232 
8233 	return newidle_balance(rq, rf) != 0;
8234 }
8235 #endif /* CONFIG_SMP */
8236 
set_next_buddy(struct sched_entity * se)8237 static void set_next_buddy(struct sched_entity *se)
8238 {
8239 	for_each_sched_entity(se) {
8240 		if (SCHED_WARN_ON(!se->on_rq))
8241 			return;
8242 		if (se_is_idle(se))
8243 			return;
8244 		cfs_rq_of(se)->next = se;
8245 	}
8246 }
8247 
8248 /*
8249  * Preempt the current task with a newly woken task if needed:
8250  */
check_preempt_wakeup(struct rq * rq,struct task_struct * p,int wake_flags)8251 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
8252 {
8253 	struct task_struct *curr = rq->curr;
8254 	struct sched_entity *se = &curr->se, *pse = &p->se;
8255 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
8256 	int next_buddy_marked = 0;
8257 	int cse_is_idle, pse_is_idle;
8258 	bool ignore = false;
8259 	bool preempt = false;
8260 
8261 	if (unlikely(se == pse))
8262 		return;
8263 	trace_android_rvh_check_preempt_wakeup_ignore(curr, &ignore);
8264 	if (ignore)
8265 		return;
8266 
8267 	/*
8268 	 * This is possible from callers such as attach_tasks(), in which we
8269 	 * unconditionally check_preempt_curr() after an enqueue (which may have
8270 	 * lead to a throttle).  This both saves work and prevents false
8271 	 * next-buddy nomination below.
8272 	 */
8273 	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
8274 		return;
8275 
8276 	if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) {
8277 		set_next_buddy(pse);
8278 		next_buddy_marked = 1;
8279 	}
8280 
8281 	/*
8282 	 * We can come here with TIF_NEED_RESCHED already set from new task
8283 	 * wake up path.
8284 	 *
8285 	 * Note: this also catches the edge-case of curr being in a throttled
8286 	 * group (e.g. via set_curr_task), since update_curr() (in the
8287 	 * enqueue of curr) will have resulted in resched being set.  This
8288 	 * prevents us from potentially nominating it as a false LAST_BUDDY
8289 	 * below.
8290 	 */
8291 	if (test_tsk_need_resched(curr))
8292 		return;
8293 
8294 	/* Idle tasks are by definition preempted by non-idle tasks. */
8295 	if (unlikely(task_has_idle_policy(curr)) &&
8296 	    likely(!task_has_idle_policy(p)))
8297 		goto preempt;
8298 
8299 	/*
8300 	 * Batch and idle tasks do not preempt non-idle tasks (their preemption
8301 	 * is driven by the tick):
8302 	 */
8303 	if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
8304 		return;
8305 
8306 	find_matching_se(&se, &pse);
8307 	WARN_ON_ONCE(!pse);
8308 
8309 	cse_is_idle = se_is_idle(se);
8310 	pse_is_idle = se_is_idle(pse);
8311 
8312 	/*
8313 	 * Preempt an idle group in favor of a non-idle group (and don't preempt
8314 	 * in the inverse case).
8315 	 */
8316 	if (cse_is_idle && !pse_is_idle)
8317 		goto preempt;
8318 	if (cse_is_idle != pse_is_idle)
8319 		return;
8320 
8321 	cfs_rq = cfs_rq_of(se);
8322 	update_curr(cfs_rq);
8323 
8324 	trace_android_rvh_check_preempt_wakeup(rq, p, &preempt, &ignore,
8325 				wake_flags, se, pse, next_buddy_marked);
8326 	if (preempt)
8327 		goto preempt;
8328 	if (ignore)
8329 		return;
8330 	/*
8331 	 * XXX pick_eevdf(cfs_rq) != se ?
8332 	 */
8333 	if (pick_eevdf(cfs_rq) == pse)
8334 		goto preempt;
8335 
8336 	return;
8337 
8338 preempt:
8339 	resched_curr(rq);
8340 }
8341 
8342 #ifdef CONFIG_SMP
pick_task_fair(struct rq * rq)8343 static struct task_struct *pick_task_fair(struct rq *rq)
8344 {
8345 	struct sched_entity *se;
8346 	struct cfs_rq *cfs_rq;
8347 
8348 again:
8349 	cfs_rq = &rq->cfs;
8350 	if (!cfs_rq->nr_running)
8351 		return NULL;
8352 
8353 	do {
8354 		struct sched_entity *curr = cfs_rq->curr;
8355 
8356 		/* When we pick for a remote RQ, we'll not have done put_prev_entity() */
8357 		if (curr) {
8358 			if (curr->on_rq)
8359 				update_curr(cfs_rq);
8360 			else
8361 				curr = NULL;
8362 
8363 			if (unlikely(check_cfs_rq_runtime(cfs_rq)))
8364 				goto again;
8365 		}
8366 
8367 		se = pick_next_entity(cfs_rq, curr);
8368 		cfs_rq = group_cfs_rq(se);
8369 	} while (cfs_rq);
8370 
8371 	return task_of(se);
8372 }
8373 #endif
8374 
8375 struct task_struct *
pick_next_task_fair(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)8376 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
8377 {
8378 	struct cfs_rq *cfs_rq = &rq->cfs;
8379 	struct sched_entity *se = NULL;
8380 	struct task_struct *p = NULL;
8381 	int new_tasks;
8382 	bool repick = false;
8383 
8384 again:
8385 	if (!sched_fair_runnable(rq))
8386 		goto idle;
8387 
8388 #ifdef CONFIG_FAIR_GROUP_SCHED
8389 	if (!prev || prev->sched_class != &fair_sched_class)
8390 		goto simple;
8391 
8392 	/*
8393 	 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
8394 	 * likely that a next task is from the same cgroup as the current.
8395 	 *
8396 	 * Therefore attempt to avoid putting and setting the entire cgroup
8397 	 * hierarchy, only change the part that actually changes.
8398 	 */
8399 
8400 	do {
8401 		struct sched_entity *curr = cfs_rq->curr;
8402 
8403 		/*
8404 		 * Since we got here without doing put_prev_entity() we also
8405 		 * have to consider cfs_rq->curr. If it is still a runnable
8406 		 * entity, update_curr() will update its vruntime, otherwise
8407 		 * forget we've ever seen it.
8408 		 */
8409 		if (curr) {
8410 			if (curr->on_rq)
8411 				update_curr(cfs_rq);
8412 			else
8413 				curr = NULL;
8414 
8415 			/*
8416 			 * This call to check_cfs_rq_runtime() will do the
8417 			 * throttle and dequeue its entity in the parent(s).
8418 			 * Therefore the nr_running test will indeed
8419 			 * be correct.
8420 			 */
8421 			if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
8422 				cfs_rq = &rq->cfs;
8423 
8424 				if (!cfs_rq->nr_running)
8425 					goto idle;
8426 
8427 				goto simple;
8428 			}
8429 		}
8430 
8431 		se = pick_next_entity(cfs_rq, curr);
8432 		cfs_rq = group_cfs_rq(se);
8433 	} while (cfs_rq);
8434 
8435 	p = task_of(se);
8436 	trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, false, prev);
8437 	/*
8438 	 * Since we haven't yet done put_prev_entity and if the selected task
8439 	 * is a different task than we started out with, try and touch the
8440 	 * least amount of cfs_rqs.
8441 	 */
8442 	if (prev != p) {
8443 		struct sched_entity *pse = &prev->se;
8444 
8445 		while (!(cfs_rq = is_same_group(se, pse))) {
8446 			int se_depth = se->depth;
8447 			int pse_depth = pse->depth;
8448 
8449 			if (se_depth <= pse_depth) {
8450 				put_prev_entity(cfs_rq_of(pse), pse);
8451 				pse = parent_entity(pse);
8452 			}
8453 			if (se_depth >= pse_depth) {
8454 				set_next_entity(cfs_rq_of(se), se);
8455 				se = parent_entity(se);
8456 			}
8457 		}
8458 
8459 		put_prev_entity(cfs_rq, pse);
8460 		set_next_entity(cfs_rq, se);
8461 	}
8462 
8463 	goto done;
8464 simple:
8465 #endif
8466 	if (prev)
8467 		put_prev_task(rq, prev);
8468 
8469 	trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, true, prev);
8470 	if (repick)
8471 		goto done;
8472 
8473 	do {
8474 		se = pick_next_entity(cfs_rq, NULL);
8475 		set_next_entity(cfs_rq, se);
8476 		cfs_rq = group_cfs_rq(se);
8477 	} while (cfs_rq);
8478 
8479 	p = task_of(se);
8480 
8481 done: __maybe_unused;
8482 #ifdef CONFIG_SMP
8483 	/*
8484 	 * Move the next running task to the front of
8485 	 * the list, so our cfs_tasks list becomes MRU
8486 	 * one.
8487 	 */
8488 	list_move(&p->se.group_node, &rq->cfs_tasks);
8489 #endif
8490 
8491 	if (hrtick_enabled_fair(rq))
8492 		hrtick_start_fair(rq, p);
8493 
8494 	update_misfit_status(p, rq);
8495 	sched_fair_update_stop_tick(rq, p);
8496 
8497 	return p;
8498 
8499 idle:
8500 	if (!rf)
8501 		return NULL;
8502 
8503 	new_tasks = newidle_balance(rq, rf);
8504 
8505 	/*
8506 	 * Because newidle_balance() releases (and re-acquires) rq->lock, it is
8507 	 * possible for any higher priority task to appear. In that case we
8508 	 * must re-start the pick_next_entity() loop.
8509 	 */
8510 	if (new_tasks < 0)
8511 		return RETRY_TASK;
8512 
8513 	if (new_tasks > 0)
8514 		goto again;
8515 
8516 	/*
8517 	 * rq is about to be idle, check if we need to update the
8518 	 * lost_idle_time of clock_pelt
8519 	 */
8520 	update_idle_rq_clock_pelt(rq);
8521 
8522 	return NULL;
8523 }
8524 
__pick_next_task_fair(struct rq * rq)8525 static struct task_struct *__pick_next_task_fair(struct rq *rq)
8526 {
8527 	return pick_next_task_fair(rq, NULL, NULL);
8528 }
8529 
8530 /*
8531  * Account for a descheduled task:
8532  */
put_prev_task_fair(struct rq * rq,struct task_struct * prev)8533 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
8534 {
8535 	struct sched_entity *se = &prev->se;
8536 	struct cfs_rq *cfs_rq;
8537 
8538 	for_each_sched_entity(se) {
8539 		cfs_rq = cfs_rq_of(se);
8540 		put_prev_entity(cfs_rq, se);
8541 	}
8542 }
8543 
8544 /*
8545  * sched_yield() is very simple
8546  */
yield_task_fair(struct rq * rq)8547 static void yield_task_fair(struct rq *rq)
8548 {
8549 	struct task_struct *curr = rq->curr;
8550 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
8551 	struct sched_entity *se = &curr->se;
8552 
8553 	/*
8554 	 * Are we the only task in the tree?
8555 	 */
8556 	if (unlikely(rq->nr_running == 1))
8557 		return;
8558 
8559 	clear_buddies(cfs_rq, se);
8560 
8561 	update_rq_clock(rq);
8562 	/*
8563 	 * Update run-time statistics of the 'current'.
8564 	 */
8565 	update_curr(cfs_rq);
8566 	/*
8567 	 * Tell update_rq_clock() that we've just updated,
8568 	 * so we don't do microscopic update in schedule()
8569 	 * and double the fastpath cost.
8570 	 */
8571 	rq_clock_skip_update(rq);
8572 
8573 	se->deadline += calc_delta_fair(se->slice, se);
8574 }
8575 
yield_to_task_fair(struct rq * rq,struct task_struct * p)8576 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
8577 {
8578 	struct sched_entity *se = &p->se;
8579 
8580 	/* throttled hierarchies are not runnable */
8581 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
8582 		return false;
8583 
8584 	/* Tell the scheduler that we'd really like pse to run next. */
8585 	set_next_buddy(se);
8586 
8587 	yield_task_fair(rq);
8588 
8589 	return true;
8590 }
8591 
8592 #ifdef CONFIG_SMP
8593 /**************************************************
8594  * Fair scheduling class load-balancing methods.
8595  *
8596  * BASICS
8597  *
8598  * The purpose of load-balancing is to achieve the same basic fairness the
8599  * per-CPU scheduler provides, namely provide a proportional amount of compute
8600  * time to each task. This is expressed in the following equation:
8601  *
8602  *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
8603  *
8604  * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
8605  * W_i,0 is defined as:
8606  *
8607  *   W_i,0 = \Sum_j w_i,j                                             (2)
8608  *
8609  * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
8610  * is derived from the nice value as per sched_prio_to_weight[].
8611  *
8612  * The weight average is an exponential decay average of the instantaneous
8613  * weight:
8614  *
8615  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
8616  *
8617  * C_i is the compute capacity of CPU i, typically it is the
8618  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
8619  * can also include other factors [XXX].
8620  *
8621  * To achieve this balance we define a measure of imbalance which follows
8622  * directly from (1):
8623  *
8624  *   imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j }    (4)
8625  *
8626  * We them move tasks around to minimize the imbalance. In the continuous
8627  * function space it is obvious this converges, in the discrete case we get
8628  * a few fun cases generally called infeasible weight scenarios.
8629  *
8630  * [XXX expand on:
8631  *     - infeasible weights;
8632  *     - local vs global optima in the discrete case. ]
8633  *
8634  *
8635  * SCHED DOMAINS
8636  *
8637  * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
8638  * for all i,j solution, we create a tree of CPUs that follows the hardware
8639  * topology where each level pairs two lower groups (or better). This results
8640  * in O(log n) layers. Furthermore we reduce the number of CPUs going up the
8641  * tree to only the first of the previous level and we decrease the frequency
8642  * of load-balance at each level inv. proportional to the number of CPUs in
8643  * the groups.
8644  *
8645  * This yields:
8646  *
8647  *     log_2 n     1     n
8648  *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
8649  *     i = 0      2^i   2^i
8650  *                               `- size of each group
8651  *         |         |     `- number of CPUs doing load-balance
8652  *         |         `- freq
8653  *         `- sum over all levels
8654  *
8655  * Coupled with a limit on how many tasks we can migrate every balance pass,
8656  * this makes (5) the runtime complexity of the balancer.
8657  *
8658  * An important property here is that each CPU is still (indirectly) connected
8659  * to every other CPU in at most O(log n) steps:
8660  *
8661  * The adjacency matrix of the resulting graph is given by:
8662  *
8663  *             log_2 n
8664  *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
8665  *             k = 0
8666  *
8667  * And you'll find that:
8668  *
8669  *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
8670  *
8671  * Showing there's indeed a path between every CPU in at most O(log n) steps.
8672  * The task movement gives a factor of O(m), giving a convergence complexity
8673  * of:
8674  *
8675  *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
8676  *
8677  *
8678  * WORK CONSERVING
8679  *
8680  * In order to avoid CPUs going idle while there's still work to do, new idle
8681  * balancing is more aggressive and has the newly idle CPU iterate up the domain
8682  * tree itself instead of relying on other CPUs to bring it work.
8683  *
8684  * This adds some complexity to both (5) and (8) but it reduces the total idle
8685  * time.
8686  *
8687  * [XXX more?]
8688  *
8689  *
8690  * CGROUPS
8691  *
8692  * Cgroups make a horror show out of (2), instead of a simple sum we get:
8693  *
8694  *                                s_k,i
8695  *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
8696  *                                 S_k
8697  *
8698  * Where
8699  *
8700  *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
8701  *
8702  * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
8703  *
8704  * The big problem is S_k, its a global sum needed to compute a local (W_i)
8705  * property.
8706  *
8707  * [XXX write more on how we solve this.. _after_ merging pjt's patches that
8708  *      rewrite all of this once again.]
8709  */
8710 
8711 unsigned long __read_mostly max_load_balance_interval = HZ/10;
8712 EXPORT_SYMBOL_GPL(max_load_balance_interval);
8713 
8714 enum fbq_type { regular, remote, all };
8715 
8716 /*
8717  * 'group_type' describes the group of CPUs at the moment of load balancing.
8718  *
8719  * The enum is ordered by pulling priority, with the group with lowest priority
8720  * first so the group_type can simply be compared when selecting the busiest
8721  * group. See update_sd_pick_busiest().
8722  */
8723 enum group_type {
8724 	/* The group has spare capacity that can be used to run more tasks.  */
8725 	group_has_spare = 0,
8726 	/*
8727 	 * The group is fully used and the tasks don't compete for more CPU
8728 	 * cycles. Nevertheless, some tasks might wait before running.
8729 	 */
8730 	group_fully_busy,
8731 	/*
8732 	 * One task doesn't fit with CPU's capacity and must be migrated to a
8733 	 * more powerful CPU.
8734 	 */
8735 	group_misfit_task,
8736 	/*
8737 	 * Balance SMT group that's fully busy. Can benefit from migration
8738 	 * a task on SMT with busy sibling to another CPU on idle core.
8739 	 */
8740 	group_smt_balance,
8741 	/*
8742 	 * SD_ASYM_PACKING only: One local CPU with higher capacity is available,
8743 	 * and the task should be migrated to it instead of running on the
8744 	 * current CPU.
8745 	 */
8746 	group_asym_packing,
8747 	/*
8748 	 * The tasks' affinity constraints previously prevented the scheduler
8749 	 * from balancing the load across the system.
8750 	 */
8751 	group_imbalanced,
8752 	/*
8753 	 * The CPU is overloaded and can't provide expected CPU cycles to all
8754 	 * tasks.
8755 	 */
8756 	group_overloaded
8757 };
8758 
8759 enum migration_type {
8760 	migrate_load = 0,
8761 	migrate_util,
8762 	migrate_task,
8763 	migrate_misfit
8764 };
8765 
8766 #define LBF_ALL_PINNED	0x01
8767 #define LBF_NEED_BREAK	0x02
8768 #define LBF_DST_PINNED  0x04
8769 #define LBF_SOME_PINNED	0x08
8770 #define LBF_ACTIVE_LB	0x10
8771 
8772 struct lb_env {
8773 	struct sched_domain	*sd;
8774 
8775 	struct rq		*src_rq;
8776 	int			src_cpu;
8777 
8778 	int			dst_cpu;
8779 	struct rq		*dst_rq;
8780 
8781 	struct cpumask		*dst_grpmask;
8782 	int			new_dst_cpu;
8783 	enum cpu_idle_type	idle;
8784 	long			imbalance;
8785 	/* The set of CPUs under consideration for load-balancing */
8786 	struct cpumask		*cpus;
8787 
8788 	unsigned int		flags;
8789 
8790 	unsigned int		loop;
8791 	unsigned int		loop_break;
8792 	unsigned int		loop_max;
8793 
8794 	enum fbq_type		fbq_type;
8795 	enum migration_type	migration_type;
8796 	struct list_head	tasks;
8797 	struct rq_flags		*src_rq_rf;
8798 };
8799 
8800 /*
8801  * Is this task likely cache-hot:
8802  */
task_hot(struct task_struct * p,struct lb_env * env)8803 static int task_hot(struct task_struct *p, struct lb_env *env)
8804 {
8805 	s64 delta;
8806 
8807 	lockdep_assert_rq_held(env->src_rq);
8808 
8809 	if (p->sched_class != &fair_sched_class)
8810 		return 0;
8811 
8812 	if (unlikely(task_has_idle_policy(p)))
8813 		return 0;
8814 
8815 	/* SMT siblings share cache */
8816 	if (env->sd->flags & SD_SHARE_CPUCAPACITY)
8817 		return 0;
8818 
8819 	/*
8820 	 * Buddy candidates are cache hot:
8821 	 */
8822 	if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
8823 	    (&p->se == cfs_rq_of(&p->se)->next))
8824 		return 1;
8825 
8826 	if (sysctl_sched_migration_cost == -1)
8827 		return 1;
8828 
8829 	/*
8830 	 * Don't migrate task if the task's cookie does not match
8831 	 * with the destination CPU's core cookie.
8832 	 */
8833 	if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p))
8834 		return 1;
8835 
8836 	if (sysctl_sched_migration_cost == 0)
8837 		return 0;
8838 
8839 	delta = rq_clock_task(env->src_rq) - p->se.exec_start;
8840 
8841 	return delta < (s64)sysctl_sched_migration_cost;
8842 }
8843 
8844 #ifdef CONFIG_NUMA_BALANCING
8845 /*
8846  * Returns 1, if task migration degrades locality
8847  * Returns 0, if task migration improves locality i.e migration preferred.
8848  * Returns -1, if task migration is not affected by locality.
8849  */
migrate_degrades_locality(struct task_struct * p,struct lb_env * env)8850 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
8851 {
8852 	struct numa_group *numa_group = rcu_dereference(p->numa_group);
8853 	unsigned long src_weight, dst_weight;
8854 	int src_nid, dst_nid, dist;
8855 
8856 	if (!static_branch_likely(&sched_numa_balancing))
8857 		return -1;
8858 
8859 	if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
8860 		return -1;
8861 
8862 	src_nid = cpu_to_node(env->src_cpu);
8863 	dst_nid = cpu_to_node(env->dst_cpu);
8864 
8865 	if (src_nid == dst_nid)
8866 		return -1;
8867 
8868 	/* Migrating away from the preferred node is always bad. */
8869 	if (src_nid == p->numa_preferred_nid) {
8870 		if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
8871 			return 1;
8872 		else
8873 			return -1;
8874 	}
8875 
8876 	/* Encourage migration to the preferred node. */
8877 	if (dst_nid == p->numa_preferred_nid)
8878 		return 0;
8879 
8880 	/* Leaving a core idle is often worse than degrading locality. */
8881 	if (env->idle == CPU_IDLE)
8882 		return -1;
8883 
8884 	dist = node_distance(src_nid, dst_nid);
8885 	if (numa_group) {
8886 		src_weight = group_weight(p, src_nid, dist);
8887 		dst_weight = group_weight(p, dst_nid, dist);
8888 	} else {
8889 		src_weight = task_weight(p, src_nid, dist);
8890 		dst_weight = task_weight(p, dst_nid, dist);
8891 	}
8892 
8893 	return dst_weight < src_weight;
8894 }
8895 
8896 #else
migrate_degrades_locality(struct task_struct * p,struct lb_env * env)8897 static inline int migrate_degrades_locality(struct task_struct *p,
8898 					     struct lb_env *env)
8899 {
8900 	return -1;
8901 }
8902 #endif
8903 
8904 /*
8905  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
8906  */
8907 static
can_migrate_task(struct task_struct * p,struct lb_env * env)8908 int can_migrate_task(struct task_struct *p, struct lb_env *env)
8909 {
8910 	int tsk_cache_hot;
8911 	int can_migrate = 1;
8912 
8913 	lockdep_assert_rq_held(env->src_rq);
8914 
8915 	trace_android_rvh_can_migrate_task(p, env->dst_cpu, &can_migrate);
8916 	if (!can_migrate)
8917 		return 0;
8918 
8919 	/*
8920 	 * We do not migrate tasks that are:
8921 	 * 1) throttled_lb_pair, or
8922 	 * 2) cannot be migrated to this CPU due to cpus_ptr, or
8923 	 * 3) running (obviously), or
8924 	 * 4) are cache-hot on their current CPU.
8925 	 */
8926 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
8927 		return 0;
8928 
8929 	/* Disregard pcpu kthreads; they are where they need to be. */
8930 	if (kthread_is_per_cpu(p))
8931 		return 0;
8932 
8933 	if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
8934 		int cpu;
8935 
8936 		schedstat_inc(p->stats.nr_failed_migrations_affine);
8937 
8938 		env->flags |= LBF_SOME_PINNED;
8939 
8940 		/*
8941 		 * Remember if this task can be migrated to any other CPU in
8942 		 * our sched_group. We may want to revisit it if we couldn't
8943 		 * meet load balance goals by pulling other tasks on src_cpu.
8944 		 *
8945 		 * Avoid computing new_dst_cpu
8946 		 * - for NEWLY_IDLE
8947 		 * - if we have already computed one in current iteration
8948 		 * - if it's an active balance
8949 		 */
8950 		if (env->idle == CPU_NEWLY_IDLE ||
8951 		    env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB))
8952 			return 0;
8953 
8954 		/* Prevent to re-select dst_cpu via env's CPUs: */
8955 		for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
8956 			if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
8957 				env->flags |= LBF_DST_PINNED;
8958 				env->new_dst_cpu = cpu;
8959 				break;
8960 			}
8961 		}
8962 
8963 		return 0;
8964 	}
8965 
8966 	/* Record that we found at least one task that could run on dst_cpu */
8967 	env->flags &= ~LBF_ALL_PINNED;
8968 
8969 	if (task_on_cpu(env->src_rq, p)) {
8970 		schedstat_inc(p->stats.nr_failed_migrations_running);
8971 		return 0;
8972 	}
8973 
8974 	/*
8975 	 * Aggressive migration if:
8976 	 * 1) active balance
8977 	 * 2) destination numa is preferred
8978 	 * 3) task is cache cold, or
8979 	 * 4) too many balance attempts have failed.
8980 	 */
8981 	if (env->flags & LBF_ACTIVE_LB)
8982 		return 1;
8983 
8984 	tsk_cache_hot = migrate_degrades_locality(p, env);
8985 	if (tsk_cache_hot == -1)
8986 		tsk_cache_hot = task_hot(p, env);
8987 
8988 	if (tsk_cache_hot <= 0 ||
8989 	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
8990 		if (tsk_cache_hot == 1) {
8991 			schedstat_inc(env->sd->lb_hot_gained[env->idle]);
8992 			schedstat_inc(p->stats.nr_forced_migrations);
8993 		}
8994 		return 1;
8995 	}
8996 
8997 	schedstat_inc(p->stats.nr_failed_migrations_hot);
8998 	return 0;
8999 }
9000 
9001 /*
9002  * detach_task() -- detach the task for the migration specified in env
9003  */
detach_task(struct task_struct * p,struct lb_env * env)9004 static void detach_task(struct task_struct *p, struct lb_env *env)
9005 {
9006 	int detached = 0;
9007 
9008 	lockdep_assert_rq_held(env->src_rq);
9009 
9010 	/*
9011 	 * The vendor hook may drop the lock temporarily, so
9012 	 * pass the rq flags to unpin lock. We expect the
9013 	 * rq lock to be held after return.
9014 	 */
9015 	trace_android_rvh_migrate_queued_task(env->src_rq, env->src_rq_rf, p,
9016 					      env->dst_cpu, &detached);
9017 	if (detached)
9018 		return;
9019 
9020 	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
9021 	set_task_cpu(p, env->dst_cpu);
9022 }
9023 
9024 /*
9025  * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
9026  * part of active balancing operations within "domain".
9027  *
9028  * Returns a task if successful and NULL otherwise.
9029  */
detach_one_task(struct lb_env * env)9030 static struct task_struct *detach_one_task(struct lb_env *env)
9031 {
9032 	struct task_struct *p;
9033 
9034 	lockdep_assert_rq_held(env->src_rq);
9035 
9036 	list_for_each_entry_reverse(p,
9037 			&env->src_rq->cfs_tasks, se.group_node) {
9038 		if (!can_migrate_task(p, env))
9039 			continue;
9040 
9041 		detach_task(p, env);
9042 
9043 		/*
9044 		 * Right now, this is only the second place where
9045 		 * lb_gained[env->idle] is updated (other is detach_tasks)
9046 		 * so we can safely collect stats here rather than
9047 		 * inside detach_tasks().
9048 		 */
9049 		schedstat_inc(env->sd->lb_gained[env->idle]);
9050 		return p;
9051 	}
9052 	return NULL;
9053 }
9054 
9055 /*
9056  * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
9057  * busiest_rq, as part of a balancing operation within domain "sd".
9058  *
9059  * Returns number of detached tasks if successful and 0 otherwise.
9060  */
detach_tasks(struct lb_env * env)9061 static int detach_tasks(struct lb_env *env)
9062 {
9063 	struct list_head *tasks = &env->src_rq->cfs_tasks;
9064 	unsigned long util, load;
9065 	struct task_struct *p;
9066 	int detached = 0;
9067 
9068 	lockdep_assert_rq_held(env->src_rq);
9069 
9070 	/*
9071 	 * Source run queue has been emptied by another CPU, clear
9072 	 * LBF_ALL_PINNED flag as we will not test any task.
9073 	 */
9074 	if (env->src_rq->nr_running <= 1) {
9075 		env->flags &= ~LBF_ALL_PINNED;
9076 		return 0;
9077 	}
9078 
9079 	if (env->imbalance <= 0)
9080 		return 0;
9081 
9082 	while (!list_empty(tasks)) {
9083 		/*
9084 		 * We don't want to steal all, otherwise we may be treated likewise,
9085 		 * which could at worst lead to a livelock crash.
9086 		 */
9087 		if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
9088 			break;
9089 
9090 		env->loop++;
9091 		/* We've more or less seen every task there is, call it quits */
9092 		if (env->loop > env->loop_max)
9093 			break;
9094 
9095 		/* take a breather every nr_migrate tasks */
9096 		if (env->loop > env->loop_break) {
9097 			env->loop_break += SCHED_NR_MIGRATE_BREAK;
9098 			env->flags |= LBF_NEED_BREAK;
9099 			break;
9100 		}
9101 
9102 		p = list_last_entry(tasks, struct task_struct, se.group_node);
9103 
9104 		if (!can_migrate_task(p, env))
9105 			goto next;
9106 
9107 		switch (env->migration_type) {
9108 		case migrate_load:
9109 			/*
9110 			 * Depending of the number of CPUs and tasks and the
9111 			 * cgroup hierarchy, task_h_load() can return a null
9112 			 * value. Make sure that env->imbalance decreases
9113 			 * otherwise detach_tasks() will stop only after
9114 			 * detaching up to loop_max tasks.
9115 			 */
9116 			load = max_t(unsigned long, task_h_load(p), 1);
9117 
9118 			if (sched_feat(LB_MIN) &&
9119 			    load < 16 && !env->sd->nr_balance_failed)
9120 				goto next;
9121 
9122 			/*
9123 			 * Make sure that we don't migrate too much load.
9124 			 * Nevertheless, let relax the constraint if
9125 			 * scheduler fails to find a good waiting task to
9126 			 * migrate.
9127 			 */
9128 			if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
9129 				goto next;
9130 
9131 			env->imbalance -= load;
9132 			break;
9133 
9134 		case migrate_util:
9135 			util = task_util_est(p);
9136 
9137 			if (shr_bound(util, env->sd->nr_balance_failed) > env->imbalance)
9138 				goto next;
9139 
9140 			env->imbalance -= util;
9141 			break;
9142 
9143 		case migrate_task:
9144 			env->imbalance--;
9145 			break;
9146 
9147 		case migrate_misfit:
9148 			/* This is not a misfit task */
9149 			if (!is_misfit_task(p, cpu_rq(env->src_cpu), NULL))
9150 				goto next;
9151 
9152 			env->imbalance = 0;
9153 			break;
9154 		}
9155 
9156 		detach_task(p, env);
9157 		list_add(&p->se.group_node, &env->tasks);
9158 
9159 		detached++;
9160 
9161 #ifdef CONFIG_PREEMPTION
9162 		/*
9163 		 * NEWIDLE balancing is a source of latency, so preemptible
9164 		 * kernels will stop after the first task is detached to minimize
9165 		 * the critical section.
9166 		 */
9167 		if (env->idle == CPU_NEWLY_IDLE)
9168 			break;
9169 #endif
9170 
9171 		/*
9172 		 * We only want to steal up to the prescribed amount of
9173 		 * load/util/tasks.
9174 		 */
9175 		if (env->imbalance <= 0)
9176 			break;
9177 
9178 		continue;
9179 next:
9180 		list_move(&p->se.group_node, tasks);
9181 	}
9182 
9183 	/*
9184 	 * Right now, this is one of only two places we collect this stat
9185 	 * so we can safely collect detach_one_task() stats here rather
9186 	 * than inside detach_one_task().
9187 	 */
9188 	schedstat_add(env->sd->lb_gained[env->idle], detached);
9189 
9190 	return detached;
9191 }
9192 
9193 /*
9194  * attach_task() -- attach the task detached by detach_task() to its new rq.
9195  */
attach_task(struct rq * rq,struct task_struct * p)9196 static void attach_task(struct rq *rq, struct task_struct *p)
9197 {
9198 	lockdep_assert_rq_held(rq);
9199 
9200 	WARN_ON_ONCE(task_rq(p) != rq);
9201 	activate_task(rq, p, ENQUEUE_NOCLOCK);
9202 	check_preempt_curr(rq, p, 0);
9203 }
9204 
9205 /*
9206  * attach_one_task() -- attaches the task returned from detach_one_task() to
9207  * its new rq.
9208  */
attach_one_task(struct rq * rq,struct task_struct * p)9209 static void attach_one_task(struct rq *rq, struct task_struct *p)
9210 {
9211 	struct rq_flags rf;
9212 
9213 	rq_lock(rq, &rf);
9214 	update_rq_clock(rq);
9215 	attach_task(rq, p);
9216 	rq_unlock(rq, &rf);
9217 }
9218 
9219 /*
9220  * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
9221  * new rq.
9222  */
attach_tasks(struct lb_env * env)9223 static void attach_tasks(struct lb_env *env)
9224 {
9225 	struct list_head *tasks = &env->tasks;
9226 	struct task_struct *p;
9227 	struct rq_flags rf;
9228 
9229 	rq_lock(env->dst_rq, &rf);
9230 	update_rq_clock(env->dst_rq);
9231 
9232 	while (!list_empty(tasks)) {
9233 		p = list_first_entry(tasks, struct task_struct, se.group_node);
9234 		list_del_init(&p->se.group_node);
9235 
9236 		attach_task(env->dst_rq, p);
9237 	}
9238 
9239 	rq_unlock(env->dst_rq, &rf);
9240 }
9241 
9242 #ifdef CONFIG_NO_HZ_COMMON
cfs_rq_has_blocked(struct cfs_rq * cfs_rq)9243 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
9244 {
9245 	if (cfs_rq->avg.load_avg)
9246 		return true;
9247 
9248 	if (cfs_rq->avg.util_avg)
9249 		return true;
9250 
9251 	return false;
9252 }
9253 
others_have_blocked(struct rq * rq)9254 static inline bool others_have_blocked(struct rq *rq)
9255 {
9256 	if (READ_ONCE(rq->avg_rt.util_avg))
9257 		return true;
9258 
9259 	if (READ_ONCE(rq->avg_dl.util_avg))
9260 		return true;
9261 
9262 	if (thermal_load_avg(rq))
9263 		return true;
9264 
9265 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
9266 	if (READ_ONCE(rq->avg_irq.util_avg))
9267 		return true;
9268 #endif
9269 
9270 	return false;
9271 }
9272 
update_blocked_load_tick(struct rq * rq)9273 static inline void update_blocked_load_tick(struct rq *rq)
9274 {
9275 	WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
9276 }
9277 
update_blocked_load_status(struct rq * rq,bool has_blocked)9278 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
9279 {
9280 	if (!has_blocked)
9281 		rq->has_blocked_load = 0;
9282 }
9283 #else
cfs_rq_has_blocked(struct cfs_rq * cfs_rq)9284 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
others_have_blocked(struct rq * rq)9285 static inline bool others_have_blocked(struct rq *rq) { return false; }
update_blocked_load_tick(struct rq * rq)9286 static inline void update_blocked_load_tick(struct rq *rq) {}
update_blocked_load_status(struct rq * rq,bool has_blocked)9287 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
9288 #endif
9289 
__update_blocked_others(struct rq * rq,bool * done)9290 static bool __update_blocked_others(struct rq *rq, bool *done)
9291 {
9292 	const struct sched_class *curr_class;
9293 	u64 now = rq_clock_pelt(rq);
9294 	unsigned long thermal_pressure;
9295 	bool decayed;
9296 
9297 	/*
9298 	 * update_load_avg() can call cpufreq_update_util(). Make sure that RT,
9299 	 * DL and IRQ signals have been updated before updating CFS.
9300 	 */
9301 	curr_class = rq->curr->sched_class;
9302 
9303 	thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
9304 
9305 	decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
9306 		  update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
9307 		  update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) |
9308 		  update_irq_load_avg(rq, 0);
9309 
9310 	if (others_have_blocked(rq))
9311 		*done = false;
9312 
9313 	return decayed;
9314 }
9315 
9316 #ifdef CONFIG_FAIR_GROUP_SCHED
9317 
__update_blocked_fair(struct rq * rq,bool * done)9318 static bool __update_blocked_fair(struct rq *rq, bool *done)
9319 {
9320 	struct cfs_rq *cfs_rq, *pos;
9321 	bool decayed = false;
9322 	int cpu = cpu_of(rq);
9323 
9324 	trace_android_rvh_update_blocked_fair(rq);
9325 
9326 	/*
9327 	 * Iterates the task_group tree in a bottom up fashion, see
9328 	 * list_add_leaf_cfs_rq() for details.
9329 	 */
9330 	for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
9331 		struct sched_entity *se;
9332 
9333 		if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
9334 			update_tg_load_avg(cfs_rq);
9335 
9336 			if (cfs_rq->nr_running == 0)
9337 				update_idle_cfs_rq_clock_pelt(cfs_rq);
9338 
9339 			if (cfs_rq == &rq->cfs)
9340 				decayed = true;
9341 		}
9342 
9343 		/* Propagate pending load changes to the parent, if any: */
9344 		se = cfs_rq->tg->se[cpu];
9345 		if (se && !skip_blocked_update(se))
9346 			update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
9347 
9348 		/*
9349 		 * There can be a lot of idle CPU cgroups.  Don't let fully
9350 		 * decayed cfs_rqs linger on the list.
9351 		 */
9352 		if (cfs_rq_is_decayed(cfs_rq))
9353 			list_del_leaf_cfs_rq(cfs_rq);
9354 
9355 		/* Don't need periodic decay once load/util_avg are null */
9356 		if (cfs_rq_has_blocked(cfs_rq))
9357 			*done = false;
9358 	}
9359 
9360 	return decayed;
9361 }
9362 
9363 /*
9364  * Compute the hierarchical load factor for cfs_rq and all its ascendants.
9365  * This needs to be done in a top-down fashion because the load of a child
9366  * group is a fraction of its parents load.
9367  */
update_cfs_rq_h_load(struct cfs_rq * cfs_rq)9368 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
9369 {
9370 	struct rq *rq = rq_of(cfs_rq);
9371 	struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
9372 	unsigned long now = jiffies;
9373 	unsigned long load;
9374 
9375 	if (cfs_rq->last_h_load_update == now)
9376 		return;
9377 
9378 	WRITE_ONCE(cfs_rq->h_load_next, NULL);
9379 	for_each_sched_entity(se) {
9380 		cfs_rq = cfs_rq_of(se);
9381 		WRITE_ONCE(cfs_rq->h_load_next, se);
9382 		if (cfs_rq->last_h_load_update == now)
9383 			break;
9384 	}
9385 
9386 	if (!se) {
9387 		cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
9388 		cfs_rq->last_h_load_update = now;
9389 	}
9390 
9391 	while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
9392 		load = cfs_rq->h_load;
9393 		load = div64_ul(load * se->avg.load_avg,
9394 			cfs_rq_load_avg(cfs_rq) + 1);
9395 		cfs_rq = group_cfs_rq(se);
9396 		cfs_rq->h_load = load;
9397 		cfs_rq->last_h_load_update = now;
9398 	}
9399 }
9400 
task_h_load(struct task_struct * p)9401 static unsigned long task_h_load(struct task_struct *p)
9402 {
9403 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
9404 
9405 	update_cfs_rq_h_load(cfs_rq);
9406 	return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
9407 			cfs_rq_load_avg(cfs_rq) + 1);
9408 }
9409 #else
__update_blocked_fair(struct rq * rq,bool * done)9410 static bool __update_blocked_fair(struct rq *rq, bool *done)
9411 {
9412 	struct cfs_rq *cfs_rq = &rq->cfs;
9413 	bool decayed;
9414 
9415 	decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
9416 	if (cfs_rq_has_blocked(cfs_rq))
9417 		*done = false;
9418 
9419 	return decayed;
9420 }
9421 
task_h_load(struct task_struct * p)9422 static unsigned long task_h_load(struct task_struct *p)
9423 {
9424 	return p->se.avg.load_avg;
9425 }
9426 #endif
9427 
update_blocked_averages(int cpu)9428 static void update_blocked_averages(int cpu)
9429 {
9430 	bool decayed = false, done = true;
9431 	struct rq *rq = cpu_rq(cpu);
9432 	struct rq_flags rf;
9433 
9434 	rq_lock_irqsave(rq, &rf);
9435 	update_blocked_load_tick(rq);
9436 	update_rq_clock(rq);
9437 
9438 	decayed |= __update_blocked_others(rq, &done);
9439 	decayed |= __update_blocked_fair(rq, &done);
9440 
9441 	update_blocked_load_status(rq, !done);
9442 	if (decayed)
9443 		cpufreq_update_util(rq, 0);
9444 	rq_unlock_irqrestore(rq, &rf);
9445 }
9446 
9447 /********** Helpers for find_busiest_group ************************/
9448 
9449 /*
9450  * sg_lb_stats - stats of a sched_group required for load_balancing
9451  */
9452 struct sg_lb_stats {
9453 	unsigned long avg_load; /*Avg load across the CPUs of the group */
9454 	unsigned long group_load; /* Total load over the CPUs of the group */
9455 	unsigned long group_capacity;
9456 	unsigned long group_util; /* Total utilization over the CPUs of the group */
9457 	unsigned long group_runnable; /* Total runnable time over the CPUs of the group */
9458 	unsigned int sum_nr_running; /* Nr of tasks running in the group */
9459 	unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
9460 	unsigned int idle_cpus;
9461 	unsigned int group_weight;
9462 	enum group_type group_type;
9463 	unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
9464 	unsigned int group_smt_balance;  /* Task on busy SMT be moved */
9465 	unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
9466 	misfit_reason_t group_misfit_reason;
9467 #ifdef CONFIG_NUMA_BALANCING
9468 	unsigned int nr_numa_running;
9469 	unsigned int nr_preferred_running;
9470 #endif
9471 };
9472 
9473 /*
9474  * sd_lb_stats - Structure to store the statistics of a sched_domain
9475  *		 during load balancing.
9476  */
9477 struct sd_lb_stats {
9478 	struct sched_group *busiest;	/* Busiest group in this sd */
9479 	struct sched_group *local;	/* Local group in this sd */
9480 	unsigned long total_load;	/* Total load of all groups in sd */
9481 	unsigned long total_capacity;	/* Total capacity of all groups in sd */
9482 	unsigned long avg_load;	/* Average load across all groups in sd */
9483 	unsigned int prefer_sibling; /* tasks should go to sibling first */
9484 
9485 	struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
9486 	struct sg_lb_stats local_stat;	/* Statistics of the local group */
9487 };
9488 
init_sd_lb_stats(struct sd_lb_stats * sds)9489 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
9490 {
9491 	/*
9492 	 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
9493 	 * local_stat because update_sg_lb_stats() does a full clear/assignment.
9494 	 * We must however set busiest_stat::group_type and
9495 	 * busiest_stat::idle_cpus to the worst busiest group because
9496 	 * update_sd_pick_busiest() reads these before assignment.
9497 	 */
9498 	*sds = (struct sd_lb_stats){
9499 		.busiest = NULL,
9500 		.local = NULL,
9501 		.total_load = 0UL,
9502 		.total_capacity = 0UL,
9503 		.busiest_stat = {
9504 			.idle_cpus = UINT_MAX,
9505 			.group_type = group_has_spare,
9506 		},
9507 	};
9508 }
9509 
scale_rt_capacity(int cpu)9510 static unsigned long scale_rt_capacity(int cpu)
9511 {
9512 	struct rq *rq = cpu_rq(cpu);
9513 	unsigned long max = arch_scale_cpu_capacity(cpu);
9514 	unsigned long used, free;
9515 	unsigned long irq;
9516 
9517 	irq = cpu_util_irq(rq);
9518 
9519 	if (unlikely(irq >= max))
9520 		return 1;
9521 
9522 	/*
9523 	 * avg_rt.util_avg and avg_dl.util_avg track binary signals
9524 	 * (running and not running) with weights 0 and 1024 respectively.
9525 	 * avg_thermal.load_avg tracks thermal pressure and the weighted
9526 	 * average uses the actual delta max capacity(load).
9527 	 */
9528 	used = READ_ONCE(rq->avg_rt.util_avg);
9529 	used += READ_ONCE(rq->avg_dl.util_avg);
9530 	used += thermal_load_avg(rq);
9531 
9532 	if (unlikely(used >= max))
9533 		return 1;
9534 
9535 	free = max - used;
9536 
9537 	return scale_irq_capacity(free, irq, max);
9538 }
9539 
update_cpu_capacity(struct sched_domain * sd,int cpu)9540 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
9541 {
9542 	unsigned long capacity = scale_rt_capacity(cpu);
9543 	struct sched_group *sdg = sd->groups;
9544 
9545 	cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
9546 
9547 	if (!capacity)
9548 		capacity = 1;
9549 
9550 	trace_android_rvh_update_cpu_capacity(cpu, &capacity);
9551 	cpu_rq(cpu)->cpu_capacity = capacity;
9552 	trace_sched_cpu_capacity_tp(cpu_rq(cpu));
9553 
9554 	sdg->sgc->capacity = capacity;
9555 	sdg->sgc->min_capacity = capacity;
9556 	sdg->sgc->max_capacity = capacity;
9557 }
9558 
update_group_capacity(struct sched_domain * sd,int cpu)9559 void update_group_capacity(struct sched_domain *sd, int cpu)
9560 {
9561 	struct sched_domain *child = sd->child;
9562 	struct sched_group *group, *sdg = sd->groups;
9563 	unsigned long capacity, min_capacity, max_capacity;
9564 	unsigned long interval;
9565 
9566 	interval = msecs_to_jiffies(sd->balance_interval);
9567 	interval = clamp(interval, 1UL, max_load_balance_interval);
9568 	sdg->sgc->next_update = jiffies + interval;
9569 
9570 	if (!child) {
9571 		update_cpu_capacity(sd, cpu);
9572 		return;
9573 	}
9574 
9575 	capacity = 0;
9576 	min_capacity = ULONG_MAX;
9577 	max_capacity = 0;
9578 
9579 	if (child->flags & SD_OVERLAP) {
9580 		/*
9581 		 * SD_OVERLAP domains cannot assume that child groups
9582 		 * span the current group.
9583 		 */
9584 
9585 		for_each_cpu(cpu, sched_group_span(sdg)) {
9586 			unsigned long cpu_cap = capacity_of(cpu);
9587 
9588 			capacity += cpu_cap;
9589 			min_capacity = min(cpu_cap, min_capacity);
9590 			max_capacity = max(cpu_cap, max_capacity);
9591 		}
9592 	} else  {
9593 		/*
9594 		 * !SD_OVERLAP domains can assume that child groups
9595 		 * span the current group.
9596 		 */
9597 
9598 		group = child->groups;
9599 		do {
9600 			struct sched_group_capacity *sgc = group->sgc;
9601 
9602 			capacity += sgc->capacity;
9603 			min_capacity = min(sgc->min_capacity, min_capacity);
9604 			max_capacity = max(sgc->max_capacity, max_capacity);
9605 			group = group->next;
9606 		} while (group != child->groups);
9607 	}
9608 
9609 	sdg->sgc->capacity = capacity;
9610 	sdg->sgc->min_capacity = min_capacity;
9611 	sdg->sgc->max_capacity = max_capacity;
9612 }
9613 
9614 /*
9615  * Check whether the capacity of the rq has been noticeably reduced by side
9616  * activity. The imbalance_pct is used for the threshold.
9617  * Return true is the capacity is reduced
9618  */
9619 static inline int
check_cpu_capacity(struct rq * rq,struct sched_domain * sd)9620 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
9621 {
9622 	return ((rq->cpu_capacity * sd->imbalance_pct) <
9623 				(rq->cpu_capacity_orig * 100));
9624 }
9625 
9626 /*
9627  * Check whether a rq has a misfit task and if it looks like we can actually
9628  * help that task: we can migrate the task to a CPU of higher capacity, or
9629  * the task's current CPU is heavily pressured.
9630  */
check_misfit_status(struct rq * rq,struct sched_domain * sd)9631 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
9632 {
9633 	return rq->misfit_task_load &&
9634 		(rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
9635 		 check_cpu_capacity(rq, sd));
9636 }
9637 
9638 /*
9639  * Group imbalance indicates (and tries to solve) the problem where balancing
9640  * groups is inadequate due to ->cpus_ptr constraints.
9641  *
9642  * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
9643  * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
9644  * Something like:
9645  *
9646  *	{ 0 1 2 3 } { 4 5 6 7 }
9647  *	        *     * * *
9648  *
9649  * If we were to balance group-wise we'd place two tasks in the first group and
9650  * two tasks in the second group. Clearly this is undesired as it will overload
9651  * cpu 3 and leave one of the CPUs in the second group unused.
9652  *
9653  * The current solution to this issue is detecting the skew in the first group
9654  * by noticing the lower domain failed to reach balance and had difficulty
9655  * moving tasks due to affinity constraints.
9656  *
9657  * When this is so detected; this group becomes a candidate for busiest; see
9658  * update_sd_pick_busiest(). And calculate_imbalance() and
9659  * find_busiest_group() avoid some of the usual balance conditions to allow it
9660  * to create an effective group imbalance.
9661  *
9662  * This is a somewhat tricky proposition since the next run might not find the
9663  * group imbalance and decide the groups need to be balanced again. A most
9664  * subtle and fragile situation.
9665  */
9666 
sg_imbalanced(struct sched_group * group)9667 static inline int sg_imbalanced(struct sched_group *group)
9668 {
9669 	return group->sgc->imbalance;
9670 }
9671 
9672 /*
9673  * group_has_capacity returns true if the group has spare capacity that could
9674  * be used by some tasks.
9675  * We consider that a group has spare capacity if the number of task is
9676  * smaller than the number of CPUs or if the utilization is lower than the
9677  * available capacity for CFS tasks.
9678  * For the latter, we use a threshold to stabilize the state, to take into
9679  * account the variance of the tasks' load and to return true if the available
9680  * capacity in meaningful for the load balancer.
9681  * As an example, an available capacity of 1% can appear but it doesn't make
9682  * any benefit for the load balance.
9683  */
9684 static inline bool
group_has_capacity(unsigned int imbalance_pct,struct sg_lb_stats * sgs)9685 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
9686 {
9687 	if (sgs->sum_nr_running < sgs->group_weight)
9688 		return true;
9689 
9690 	if ((sgs->group_capacity * imbalance_pct) <
9691 			(sgs->group_runnable * 100))
9692 		return false;
9693 
9694 	if ((sgs->group_capacity * 100) >
9695 			(sgs->group_util * imbalance_pct))
9696 		return true;
9697 
9698 	return false;
9699 }
9700 
9701 /*
9702  *  group_is_overloaded returns true if the group has more tasks than it can
9703  *  handle.
9704  *  group_is_overloaded is not equals to !group_has_capacity because a group
9705  *  with the exact right number of tasks, has no more spare capacity but is not
9706  *  overloaded so both group_has_capacity and group_is_overloaded return
9707  *  false.
9708  */
9709 static inline bool
group_is_overloaded(unsigned int imbalance_pct,struct sg_lb_stats * sgs)9710 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
9711 {
9712 	if (sgs->sum_nr_running <= sgs->group_weight)
9713 		return false;
9714 
9715 	if ((sgs->group_capacity * 100) <
9716 			(sgs->group_util * imbalance_pct))
9717 		return true;
9718 
9719 	if ((sgs->group_capacity * imbalance_pct) <
9720 			(sgs->group_runnable * 100))
9721 		return true;
9722 
9723 	return false;
9724 }
9725 
9726 static inline enum
group_classify(unsigned int imbalance_pct,struct sched_group * group,struct sg_lb_stats * sgs)9727 group_type group_classify(unsigned int imbalance_pct,
9728 			  struct sched_group *group,
9729 			  struct sg_lb_stats *sgs)
9730 {
9731 	if (group_is_overloaded(imbalance_pct, sgs))
9732 		return group_overloaded;
9733 
9734 	if (sg_imbalanced(group))
9735 		return group_imbalanced;
9736 
9737 	if (sgs->group_asym_packing)
9738 		return group_asym_packing;
9739 
9740 	if (sgs->group_smt_balance)
9741 		return group_smt_balance;
9742 
9743 	if (sgs->group_misfit_task_load)
9744 		return group_misfit_task;
9745 
9746 	if (!group_has_capacity(imbalance_pct, sgs))
9747 		return group_fully_busy;
9748 
9749 	return group_has_spare;
9750 }
9751 
9752 /**
9753  * sched_use_asym_prio - Check whether asym_packing priority must be used
9754  * @sd:		The scheduling domain of the load balancing
9755  * @cpu:	A CPU
9756  *
9757  * Always use CPU priority when balancing load between SMT siblings. When
9758  * balancing load between cores, it is not sufficient that @cpu is idle. Only
9759  * use CPU priority if the whole core is idle.
9760  *
9761  * Returns: True if the priority of @cpu must be followed. False otherwise.
9762  */
sched_use_asym_prio(struct sched_domain * sd,int cpu)9763 static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
9764 {
9765 	if (!sched_smt_active())
9766 		return true;
9767 
9768 	return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu);
9769 }
9770 
9771 /**
9772  * sched_asym - Check if the destination CPU can do asym_packing load balance
9773  * @env:	The load balancing environment
9774  * @sds:	Load-balancing data with statistics of the local group
9775  * @sgs:	Load-balancing statistics of the candidate busiest group
9776  * @group:	The candidate busiest group
9777  *
9778  * @env::dst_cpu can do asym_packing if it has higher priority than the
9779  * preferred CPU of @group.
9780  *
9781  * SMT is a special case. If we are balancing load between cores, @env::dst_cpu
9782  * can do asym_packing balance only if all its SMT siblings are idle. Also, it
9783  * can only do it if @group is an SMT group and has exactly on busy CPU. Larger
9784  * imbalances in the number of CPUS are dealt with in find_busiest_group().
9785  *
9786  * If we are balancing load within an SMT core, or at DIE domain level, always
9787  * proceed.
9788  *
9789  * Return: true if @env::dst_cpu can do with asym_packing load balance. False
9790  * otherwise.
9791  */
9792 static inline bool
sched_asym(struct lb_env * env,struct sd_lb_stats * sds,struct sg_lb_stats * sgs,struct sched_group * group)9793 sched_asym(struct lb_env *env, struct sd_lb_stats *sds,  struct sg_lb_stats *sgs,
9794 	   struct sched_group *group)
9795 {
9796 	/* Ensure that the whole local core is idle, if applicable. */
9797 	if (!sched_use_asym_prio(env->sd, env->dst_cpu))
9798 		return false;
9799 
9800 	/*
9801 	 * CPU priorities does not make sense for SMT cores with more than one
9802 	 * busy sibling.
9803 	 */
9804 	if (group->flags & SD_SHARE_CPUCAPACITY) {
9805 		if (sgs->group_weight - sgs->idle_cpus != 1)
9806 			return false;
9807 	}
9808 
9809 	return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
9810 }
9811 
9812 /* One group has more than one SMT CPU while the other group does not */
smt_vs_nonsmt_groups(struct sched_group * sg1,struct sched_group * sg2)9813 static inline bool smt_vs_nonsmt_groups(struct sched_group *sg1,
9814 				    struct sched_group *sg2)
9815 {
9816 	if (!sg1 || !sg2)
9817 		return false;
9818 
9819 	return (sg1->flags & SD_SHARE_CPUCAPACITY) !=
9820 		(sg2->flags & SD_SHARE_CPUCAPACITY);
9821 }
9822 
smt_balance(struct lb_env * env,struct sg_lb_stats * sgs,struct sched_group * group)9823 static inline bool smt_balance(struct lb_env *env, struct sg_lb_stats *sgs,
9824 			       struct sched_group *group)
9825 {
9826 	if (env->idle == CPU_NOT_IDLE)
9827 		return false;
9828 
9829 	/*
9830 	 * For SMT source group, it is better to move a task
9831 	 * to a CPU that doesn't have multiple tasks sharing its CPU capacity.
9832 	 * Note that if a group has a single SMT, SD_SHARE_CPUCAPACITY
9833 	 * will not be on.
9834 	 */
9835 	if (group->flags & SD_SHARE_CPUCAPACITY &&
9836 	    sgs->sum_h_nr_running > 1)
9837 		return true;
9838 
9839 	return false;
9840 }
9841 
sibling_imbalance(struct lb_env * env,struct sd_lb_stats * sds,struct sg_lb_stats * busiest,struct sg_lb_stats * local)9842 static inline long sibling_imbalance(struct lb_env *env,
9843 				    struct sd_lb_stats *sds,
9844 				    struct sg_lb_stats *busiest,
9845 				    struct sg_lb_stats *local)
9846 {
9847 	int ncores_busiest, ncores_local;
9848 	long imbalance;
9849 
9850 	if (env->idle == CPU_NOT_IDLE || !busiest->sum_nr_running)
9851 		return 0;
9852 
9853 	ncores_busiest = sds->busiest->cores;
9854 	ncores_local = sds->local->cores;
9855 
9856 	if (ncores_busiest == ncores_local) {
9857 		imbalance = busiest->sum_nr_running;
9858 		lsub_positive(&imbalance, local->sum_nr_running);
9859 		return imbalance;
9860 	}
9861 
9862 	/* Balance such that nr_running/ncores ratio are same on both groups */
9863 	imbalance = ncores_local * busiest->sum_nr_running;
9864 	lsub_positive(&imbalance, ncores_busiest * local->sum_nr_running);
9865 	/* Normalize imbalance and do rounding on normalization */
9866 	imbalance = 2 * imbalance + ncores_local + ncores_busiest;
9867 	imbalance /= ncores_local + ncores_busiest;
9868 
9869 	/* Take advantage of resource in an empty sched group */
9870 	if (imbalance <= 1 && local->sum_nr_running == 0 &&
9871 	    busiest->sum_nr_running > 1)
9872 		imbalance = 2;
9873 
9874 	return imbalance;
9875 }
9876 
9877 static inline bool
sched_reduced_capacity(struct rq * rq,struct sched_domain * sd)9878 sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
9879 {
9880 	/*
9881 	 * When there is more than 1 task, the group_overloaded case already
9882 	 * takes care of cpu with reduced capacity
9883 	 */
9884 	if (rq->cfs.h_nr_running != 1)
9885 		return false;
9886 
9887 	return check_cpu_capacity(rq, sd);
9888 }
9889 
9890 /**
9891  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
9892  * @env: The load balancing environment.
9893  * @sds: Load-balancing data with statistics of the local group.
9894  * @group: sched_group whose statistics are to be updated.
9895  * @sgs: variable to hold the statistics for this group.
9896  * @sg_status: Holds flag indicating the status of the sched_group
9897  */
update_sg_lb_stats(struct lb_env * env,struct sd_lb_stats * sds,struct sched_group * group,struct sg_lb_stats * sgs,int * sg_status)9898 static inline void update_sg_lb_stats(struct lb_env *env,
9899 				      struct sd_lb_stats *sds,
9900 				      struct sched_group *group,
9901 				      struct sg_lb_stats *sgs,
9902 				      int *sg_status)
9903 {
9904 	int i, nr_running, local_group;
9905 
9906 	memset(sgs, 0, sizeof(*sgs));
9907 
9908 	local_group = group == sds->local;
9909 
9910 	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
9911 		struct rq *rq = cpu_rq(i);
9912 		unsigned long load = cpu_load(rq);
9913 
9914 		sgs->group_load += load;
9915 		sgs->group_util += cpu_util_cfs(i);
9916 		sgs->group_runnable += cpu_runnable(rq);
9917 		sgs->sum_h_nr_running += rq->cfs.h_nr_running;
9918 
9919 		nr_running = rq->nr_running;
9920 		sgs->sum_nr_running += nr_running;
9921 
9922 		if (nr_running > 1)
9923 			*sg_status |= SG_OVERLOAD;
9924 
9925 		if (cpu_overutilized(i))
9926 			*sg_status |= SG_OVERUTILIZED;
9927 
9928 #ifdef CONFIG_NUMA_BALANCING
9929 		sgs->nr_numa_running += rq->nr_numa_running;
9930 		sgs->nr_preferred_running += rq->nr_preferred_running;
9931 #endif
9932 		/*
9933 		 * No need to call idle_cpu() if nr_running is not 0
9934 		 */
9935 		if (!nr_running && idle_cpu(i)) {
9936 			sgs->idle_cpus++;
9937 			/* Idle cpu can't have misfit task */
9938 			continue;
9939 		}
9940 
9941 		if (local_group)
9942 			continue;
9943 
9944 		if (env->sd->flags & SD_ASYM_CPUCAPACITY) {
9945 			/* Check for a misfit task on the cpu */
9946 			if (sgs->group_misfit_task_load < rq->misfit_task_load) {
9947 				sgs->group_misfit_task_load = rq->misfit_task_load;
9948 				sgs->group_misfit_reason = rq->misfit_reason;
9949 				*sg_status |= SG_OVERLOAD;
9950 			}
9951 		} else if ((env->idle != CPU_NOT_IDLE) &&
9952 			   sched_reduced_capacity(rq, env->sd)) {
9953 			/* Check for a task running on a CPU with reduced capacity */
9954 			if (sgs->group_misfit_task_load < load)
9955 				sgs->group_misfit_task_load = load;
9956 		}
9957 	}
9958 
9959 	sgs->group_capacity = group->sgc->capacity;
9960 
9961 	sgs->group_weight = group->group_weight;
9962 
9963 	/* Check if dst CPU is idle and preferred to this group */
9964 	if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
9965 	    env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
9966 	    sched_asym(env, sds, sgs, group)) {
9967 		sgs->group_asym_packing = 1;
9968 	}
9969 
9970 	/* Check for loaded SMT group to be balanced to dst CPU */
9971 	if (!local_group && smt_balance(env, sgs, group))
9972 		sgs->group_smt_balance = 1;
9973 
9974 	sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
9975 
9976 	/* Computing avg_load makes sense only when group is overloaded */
9977 	if (sgs->group_type == group_overloaded)
9978 		sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
9979 				sgs->group_capacity;
9980 }
9981 
9982 /**
9983  * update_sd_pick_busiest - return 1 on busiest group
9984  * @env: The load balancing environment.
9985  * @sds: sched_domain statistics
9986  * @sg: sched_group candidate to be checked for being the busiest
9987  * @sgs: sched_group statistics
9988  *
9989  * Determine if @sg is a busier group than the previously selected
9990  * busiest group.
9991  *
9992  * Return: %true if @sg is a busier group than the previously selected
9993  * busiest group. %false otherwise.
9994  */
update_sd_pick_busiest(struct lb_env * env,struct sd_lb_stats * sds,struct sched_group * sg,struct sg_lb_stats * sgs)9995 static bool update_sd_pick_busiest(struct lb_env *env,
9996 				   struct sd_lb_stats *sds,
9997 				   struct sched_group *sg,
9998 				   struct sg_lb_stats *sgs)
9999 {
10000 	struct sg_lb_stats *busiest = &sds->busiest_stat;
10001 
10002 	/* Make sure that there is at least one task to pull */
10003 	if (!sgs->sum_h_nr_running)
10004 		return false;
10005 
10006 	/*
10007 	 * Don't try to pull misfit tasks we can't help.
10008 	 * We can use max_capacity here as reduction in capacity on some
10009 	 * CPUs in the group should either be possible to resolve
10010 	 * internally or be covered by avg_load imbalance (eventually).
10011 	 */
10012 	if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
10013 	    (sgs->group_type == group_misfit_task) &&
10014 	    (sgs->group_misfit_reason == MISFIT_PERF) &&
10015 	    (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) ||
10016 	     sds->local_stat.group_type != group_has_spare))
10017 		return false;
10018 
10019 	if (sgs->group_type > busiest->group_type)
10020 		return true;
10021 
10022 	if (sgs->group_type < busiest->group_type)
10023 		return false;
10024 
10025 	/*
10026 	 * The candidate and the current busiest group are the same type of
10027 	 * group. Let check which one is the busiest according to the type.
10028 	 */
10029 
10030 	switch (sgs->group_type) {
10031 	case group_overloaded:
10032 		/* Select the overloaded group with highest avg_load. */
10033 		if (sgs->avg_load <= busiest->avg_load)
10034 			return false;
10035 		break;
10036 
10037 	case group_imbalanced:
10038 		/*
10039 		 * Select the 1st imbalanced group as we don't have any way to
10040 		 * choose one more than another.
10041 		 */
10042 		return false;
10043 
10044 	case group_asym_packing:
10045 		/* Prefer to move from lowest priority CPU's work */
10046 		if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
10047 			return false;
10048 		break;
10049 
10050 	case group_misfit_task:
10051 		/*
10052 		 * If we have more than one misfit sg go with the biggest
10053 		 * misfit.
10054 		 */
10055 		if (sgs->group_misfit_task_load < busiest->group_misfit_task_load)
10056 			return false;
10057 		break;
10058 
10059 	case group_smt_balance:
10060 		/*
10061 		 * Check if we have spare CPUs on either SMT group to
10062 		 * choose has spare or fully busy handling.
10063 		 */
10064 		if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
10065 			goto has_spare;
10066 
10067 		fallthrough;
10068 
10069 	case group_fully_busy:
10070 		/*
10071 		 * Select the fully busy group with highest avg_load. In
10072 		 * theory, there is no need to pull task from such kind of
10073 		 * group because tasks have all compute capacity that they need
10074 		 * but we can still improve the overall throughput by reducing
10075 		 * contention when accessing shared HW resources.
10076 		 *
10077 		 * XXX for now avg_load is not computed and always 0 so we
10078 		 * select the 1st one, except if @sg is composed of SMT
10079 		 * siblings.
10080 		 */
10081 
10082 		if (sgs->avg_load < busiest->avg_load)
10083 			return false;
10084 
10085 		if (sgs->avg_load == busiest->avg_load) {
10086 			/*
10087 			 * SMT sched groups need more help than non-SMT groups.
10088 			 * If @sg happens to also be SMT, either choice is good.
10089 			 */
10090 			if (sds->busiest->flags & SD_SHARE_CPUCAPACITY)
10091 				return false;
10092 		}
10093 
10094 		break;
10095 
10096 	case group_has_spare:
10097 		/*
10098 		 * Do not pick sg with SMT CPUs over sg with pure CPUs,
10099 		 * as we do not want to pull task off SMT core with one task
10100 		 * and make the core idle.
10101 		 */
10102 		if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
10103 			if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
10104 				return false;
10105 			else
10106 				return true;
10107 		}
10108 has_spare:
10109 
10110 		/*
10111 		 * Select not overloaded group with lowest number of idle cpus
10112 		 * and highest number of running tasks. We could also compare
10113 		 * the spare capacity which is more stable but it can end up
10114 		 * that the group has less spare capacity but finally more idle
10115 		 * CPUs which means less opportunity to pull tasks.
10116 		 */
10117 		if (sgs->idle_cpus > busiest->idle_cpus)
10118 			return false;
10119 		else if ((sgs->idle_cpus == busiest->idle_cpus) &&
10120 			 (sgs->sum_nr_running <= busiest->sum_nr_running))
10121 			return false;
10122 
10123 		break;
10124 	}
10125 
10126 	/*
10127 	 * Candidate sg has no more than one task per CPU and has higher
10128 	 * per-CPU capacity. Migrating tasks to less capable CPUs may harm
10129 	 * throughput. Maximize throughput, power/energy consequences are not
10130 	 * considered.
10131 	 */
10132 	if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
10133 	    (sgs->group_type <= group_fully_busy) &&
10134 	    (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu))))
10135 		return false;
10136 
10137 	return true;
10138 }
10139 
10140 #ifdef CONFIG_NUMA_BALANCING
fbq_classify_group(struct sg_lb_stats * sgs)10141 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
10142 {
10143 	if (sgs->sum_h_nr_running > sgs->nr_numa_running)
10144 		return regular;
10145 	if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
10146 		return remote;
10147 	return all;
10148 }
10149 
fbq_classify_rq(struct rq * rq)10150 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
10151 {
10152 	if (rq->nr_running > rq->nr_numa_running)
10153 		return regular;
10154 	if (rq->nr_running > rq->nr_preferred_running)
10155 		return remote;
10156 	return all;
10157 }
10158 #else
fbq_classify_group(struct sg_lb_stats * sgs)10159 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
10160 {
10161 	return all;
10162 }
10163 
fbq_classify_rq(struct rq * rq)10164 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
10165 {
10166 	return regular;
10167 }
10168 #endif /* CONFIG_NUMA_BALANCING */
10169 
10170 
10171 struct sg_lb_stats;
10172 
10173 /*
10174  * task_running_on_cpu - return 1 if @p is running on @cpu.
10175  */
10176 
task_running_on_cpu(int cpu,struct task_struct * p)10177 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
10178 {
10179 	/* Task has no contribution or is new */
10180 	if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
10181 		return 0;
10182 
10183 	if (task_on_rq_queued(p))
10184 		return 1;
10185 
10186 	return 0;
10187 }
10188 
10189 /**
10190  * idle_cpu_without - would a given CPU be idle without p ?
10191  * @cpu: the processor on which idleness is tested.
10192  * @p: task which should be ignored.
10193  *
10194  * Return: 1 if the CPU would be idle. 0 otherwise.
10195  */
idle_cpu_without(int cpu,struct task_struct * p)10196 static int idle_cpu_without(int cpu, struct task_struct *p)
10197 {
10198 	struct rq *rq = cpu_rq(cpu);
10199 
10200 	if (rq->curr != rq->idle && rq->curr != p)
10201 		return 0;
10202 
10203 	/*
10204 	 * rq->nr_running can't be used but an updated version without the
10205 	 * impact of p on cpu must be used instead. The updated nr_running
10206 	 * be computed and tested before calling idle_cpu_without().
10207 	 */
10208 
10209 #ifdef CONFIG_SMP
10210 	if (rq->ttwu_pending)
10211 		return 0;
10212 #endif
10213 
10214 	return 1;
10215 }
10216 
10217 /*
10218  * update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
10219  * @sd: The sched_domain level to look for idlest group.
10220  * @group: sched_group whose statistics are to be updated.
10221  * @sgs: variable to hold the statistics for this group.
10222  * @p: The task for which we look for the idlest group/CPU.
10223  */
update_sg_wakeup_stats(struct sched_domain * sd,struct sched_group * group,struct sg_lb_stats * sgs,struct task_struct * p)10224 static inline void update_sg_wakeup_stats(struct sched_domain *sd,
10225 					  struct sched_group *group,
10226 					  struct sg_lb_stats *sgs,
10227 					  struct task_struct *p)
10228 {
10229 	int i, nr_running;
10230 
10231 	memset(sgs, 0, sizeof(*sgs));
10232 
10233 	/* Assume that task can't fit any CPU of the group */
10234 	if (sd->flags & SD_ASYM_CPUCAPACITY)
10235 		sgs->group_misfit_task_load = 1;
10236 
10237 	for_each_cpu(i, sched_group_span(group)) {
10238 		struct rq *rq = cpu_rq(i);
10239 		misfit_reason_t reason;
10240 		unsigned int local;
10241 
10242 		sgs->group_load += cpu_load_without(rq, p);
10243 		sgs->group_util += cpu_util_without(i, p);
10244 		sgs->group_runnable += cpu_runnable_without(rq, p);
10245 		local = task_running_on_cpu(i, p);
10246 		sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
10247 
10248 		nr_running = rq->nr_running - local;
10249 		sgs->sum_nr_running += nr_running;
10250 
10251 		/*
10252 		 * No need to call idle_cpu_without() if nr_running is not 0
10253 		 */
10254 		if (!nr_running && idle_cpu_without(i, p))
10255 			sgs->idle_cpus++;
10256 
10257 		/* Check if task fits in the CPU */
10258 		if (sd->flags & SD_ASYM_CPUCAPACITY &&
10259 		    sgs->group_misfit_task_load) {
10260 			if (!is_misfit_task(p, rq, &reason)) {
10261 				sgs->group_misfit_task_load = 0;
10262 				sgs->group_misfit_reason = -1;
10263 			} else {
10264 				sgs->group_misfit_task_load =
10265 					max_t(unsigned long, task_h_load(p), 1);
10266 				sgs->group_misfit_reason = reason;
10267 			}
10268 		}
10269 
10270 	}
10271 
10272 	sgs->group_capacity = group->sgc->capacity;
10273 
10274 	sgs->group_weight = group->group_weight;
10275 
10276 	sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
10277 
10278 	/*
10279 	 * Computing avg_load makes sense only when group is fully busy or
10280 	 * overloaded
10281 	 */
10282 	if (sgs->group_type == group_fully_busy ||
10283 		sgs->group_type == group_overloaded)
10284 		sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
10285 				sgs->group_capacity;
10286 }
10287 
update_pick_idlest(struct sched_group * idlest,struct sg_lb_stats * idlest_sgs,struct sched_group * group,struct sg_lb_stats * sgs)10288 static bool update_pick_idlest(struct sched_group *idlest,
10289 			       struct sg_lb_stats *idlest_sgs,
10290 			       struct sched_group *group,
10291 			       struct sg_lb_stats *sgs)
10292 {
10293 	if (sgs->group_type < idlest_sgs->group_type)
10294 		return true;
10295 
10296 	if (sgs->group_type > idlest_sgs->group_type)
10297 		return false;
10298 
10299 	/*
10300 	 * The candidate and the current idlest group are the same type of
10301 	 * group. Let check which one is the idlest according to the type.
10302 	 */
10303 
10304 	switch (sgs->group_type) {
10305 	case group_overloaded:
10306 	case group_fully_busy:
10307 		/* Select the group with lowest avg_load. */
10308 		if (idlest_sgs->avg_load <= sgs->avg_load)
10309 			return false;
10310 		break;
10311 
10312 	case group_imbalanced:
10313 	case group_asym_packing:
10314 	case group_smt_balance:
10315 		/* Those types are not used in the slow wakeup path */
10316 		return false;
10317 
10318 	case group_misfit_task:
10319 		/* Select group with the highest max capacity */
10320 		if (idlest->sgc->max_capacity >= group->sgc->max_capacity)
10321 			return false;
10322 		break;
10323 
10324 	case group_has_spare:
10325 		/* Select group with most idle CPUs */
10326 		if (idlest_sgs->idle_cpus > sgs->idle_cpus)
10327 			return false;
10328 
10329 		/* Select group with lowest group_util */
10330 		if (idlest_sgs->idle_cpus == sgs->idle_cpus &&
10331 			idlest_sgs->group_util <= sgs->group_util)
10332 			return false;
10333 
10334 		break;
10335 	}
10336 
10337 	return true;
10338 }
10339 
10340 /*
10341  * find_idlest_group() finds and returns the least busy CPU group within the
10342  * domain.
10343  *
10344  * Assumes p is allowed on at least one CPU in sd.
10345  */
10346 static struct sched_group *
find_idlest_group(struct sched_domain * sd,struct task_struct * p,int this_cpu)10347 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
10348 {
10349 	struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
10350 	struct sg_lb_stats local_sgs, tmp_sgs;
10351 	struct sg_lb_stats *sgs;
10352 	unsigned long imbalance;
10353 	struct sg_lb_stats idlest_sgs = {
10354 			.avg_load = UINT_MAX,
10355 			.group_type = group_overloaded,
10356 	};
10357 
10358 	do {
10359 		int local_group;
10360 
10361 		/* Skip over this group if it has no CPUs allowed */
10362 		if (!cpumask_intersects(sched_group_span(group),
10363 					p->cpus_ptr))
10364 			continue;
10365 
10366 		/* Skip over this group if no cookie matched */
10367 		if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group))
10368 			continue;
10369 
10370 		local_group = cpumask_test_cpu(this_cpu,
10371 					       sched_group_span(group));
10372 
10373 		if (local_group) {
10374 			sgs = &local_sgs;
10375 			local = group;
10376 		} else {
10377 			sgs = &tmp_sgs;
10378 		}
10379 
10380 		update_sg_wakeup_stats(sd, group, sgs, p);
10381 
10382 		if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
10383 			idlest = group;
10384 			idlest_sgs = *sgs;
10385 		}
10386 
10387 	} while (group = group->next, group != sd->groups);
10388 
10389 
10390 	/* There is no idlest group to push tasks to */
10391 	if (!idlest)
10392 		return NULL;
10393 
10394 	/* The local group has been skipped because of CPU affinity */
10395 	if (!local)
10396 		return idlest;
10397 
10398 	/*
10399 	 * If the local group is idler than the selected idlest group
10400 	 * don't try and push the task.
10401 	 */
10402 	if (local_sgs.group_type < idlest_sgs.group_type)
10403 		return NULL;
10404 
10405 	/*
10406 	 * If the local group is busier than the selected idlest group
10407 	 * try and push the task.
10408 	 */
10409 	if (local_sgs.group_type > idlest_sgs.group_type)
10410 		return idlest;
10411 
10412 	switch (local_sgs.group_type) {
10413 	case group_overloaded:
10414 	case group_fully_busy:
10415 
10416 		/* Calculate allowed imbalance based on load */
10417 		imbalance = scale_load_down(NICE_0_LOAD) *
10418 				(sd->imbalance_pct-100) / 100;
10419 
10420 		/*
10421 		 * When comparing groups across NUMA domains, it's possible for
10422 		 * the local domain to be very lightly loaded relative to the
10423 		 * remote domains but "imbalance" skews the comparison making
10424 		 * remote CPUs look much more favourable. When considering
10425 		 * cross-domain, add imbalance to the load on the remote node
10426 		 * and consider staying local.
10427 		 */
10428 
10429 		if ((sd->flags & SD_NUMA) &&
10430 		    ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load))
10431 			return NULL;
10432 
10433 		/*
10434 		 * If the local group is less loaded than the selected
10435 		 * idlest group don't try and push any tasks.
10436 		 */
10437 		if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance))
10438 			return NULL;
10439 
10440 		if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load)
10441 			return NULL;
10442 		break;
10443 
10444 	case group_imbalanced:
10445 	case group_asym_packing:
10446 	case group_smt_balance:
10447 		/* Those type are not used in the slow wakeup path */
10448 		return NULL;
10449 
10450 	case group_misfit_task:
10451 		/* Select group with the highest max capacity */
10452 		if (local->sgc->max_capacity >= idlest->sgc->max_capacity)
10453 			return NULL;
10454 		break;
10455 
10456 	case group_has_spare:
10457 #ifdef CONFIG_NUMA
10458 		if (sd->flags & SD_NUMA) {
10459 			int imb_numa_nr = sd->imb_numa_nr;
10460 #ifdef CONFIG_NUMA_BALANCING
10461 			int idlest_cpu;
10462 			/*
10463 			 * If there is spare capacity at NUMA, try to select
10464 			 * the preferred node
10465 			 */
10466 			if (cpu_to_node(this_cpu) == p->numa_preferred_nid)
10467 				return NULL;
10468 
10469 			idlest_cpu = cpumask_first(sched_group_span(idlest));
10470 			if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid)
10471 				return idlest;
10472 #endif /* CONFIG_NUMA_BALANCING */
10473 			/*
10474 			 * Otherwise, keep the task close to the wakeup source
10475 			 * and improve locality if the number of running tasks
10476 			 * would remain below threshold where an imbalance is
10477 			 * allowed while accounting for the possibility the
10478 			 * task is pinned to a subset of CPUs. If there is a
10479 			 * real need of migration, periodic load balance will
10480 			 * take care of it.
10481 			 */
10482 			if (p->nr_cpus_allowed != NR_CPUS) {
10483 				struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
10484 
10485 				cpumask_and(cpus, sched_group_span(local), p->cpus_ptr);
10486 				imb_numa_nr = min(cpumask_weight(cpus), sd->imb_numa_nr);
10487 			}
10488 
10489 			imbalance = abs(local_sgs.idle_cpus - idlest_sgs.idle_cpus);
10490 			if (!adjust_numa_imbalance(imbalance,
10491 						   local_sgs.sum_nr_running + 1,
10492 						   imb_numa_nr)) {
10493 				return NULL;
10494 			}
10495 		}
10496 #endif /* CONFIG_NUMA */
10497 
10498 		/*
10499 		 * Select group with highest number of idle CPUs. We could also
10500 		 * compare the utilization which is more stable but it can end
10501 		 * up that the group has less spare capacity but finally more
10502 		 * idle CPUs which means more opportunity to run task.
10503 		 */
10504 		if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus)
10505 			return NULL;
10506 		break;
10507 	}
10508 
10509 	return idlest;
10510 }
10511 
update_idle_cpu_scan(struct lb_env * env,unsigned long sum_util)10512 static void update_idle_cpu_scan(struct lb_env *env,
10513 				 unsigned long sum_util)
10514 {
10515 	struct sched_domain_shared *sd_share;
10516 	int llc_weight, pct;
10517 	u64 x, y, tmp;
10518 	/*
10519 	 * Update the number of CPUs to scan in LLC domain, which could
10520 	 * be used as a hint in select_idle_cpu(). The update of sd_share
10521 	 * could be expensive because it is within a shared cache line.
10522 	 * So the write of this hint only occurs during periodic load
10523 	 * balancing, rather than CPU_NEWLY_IDLE, because the latter
10524 	 * can fire way more frequently than the former.
10525 	 */
10526 	if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE)
10527 		return;
10528 
10529 	llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
10530 	if (env->sd->span_weight != llc_weight)
10531 		return;
10532 
10533 	sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu));
10534 	if (!sd_share)
10535 		return;
10536 
10537 	/*
10538 	 * The number of CPUs to search drops as sum_util increases, when
10539 	 * sum_util hits 85% or above, the scan stops.
10540 	 * The reason to choose 85% as the threshold is because this is the
10541 	 * imbalance_pct(117) when a LLC sched group is overloaded.
10542 	 *
10543 	 * let y = SCHED_CAPACITY_SCALE - p * x^2                       [1]
10544 	 * and y'= y / SCHED_CAPACITY_SCALE
10545 	 *
10546 	 * x is the ratio of sum_util compared to the CPU capacity:
10547 	 * x = sum_util / (llc_weight * SCHED_CAPACITY_SCALE)
10548 	 * y' is the ratio of CPUs to be scanned in the LLC domain,
10549 	 * and the number of CPUs to scan is calculated by:
10550 	 *
10551 	 * nr_scan = llc_weight * y'                                    [2]
10552 	 *
10553 	 * When x hits the threshold of overloaded, AKA, when
10554 	 * x = 100 / pct, y drops to 0. According to [1],
10555 	 * p should be SCHED_CAPACITY_SCALE * pct^2 / 10000
10556 	 *
10557 	 * Scale x by SCHED_CAPACITY_SCALE:
10558 	 * x' = sum_util / llc_weight;                                  [3]
10559 	 *
10560 	 * and finally [1] becomes:
10561 	 * y = SCHED_CAPACITY_SCALE -
10562 	 *     x'^2 * pct^2 / (10000 * SCHED_CAPACITY_SCALE)            [4]
10563 	 *
10564 	 */
10565 	/* equation [3] */
10566 	x = sum_util;
10567 	do_div(x, llc_weight);
10568 
10569 	/* equation [4] */
10570 	pct = env->sd->imbalance_pct;
10571 	tmp = x * x * pct * pct;
10572 	do_div(tmp, 10000 * SCHED_CAPACITY_SCALE);
10573 	tmp = min_t(long, tmp, SCHED_CAPACITY_SCALE);
10574 	y = SCHED_CAPACITY_SCALE - tmp;
10575 
10576 	/* equation [2] */
10577 	y *= llc_weight;
10578 	do_div(y, SCHED_CAPACITY_SCALE);
10579 	if ((int)y != sd_share->nr_idle_scan)
10580 		WRITE_ONCE(sd_share->nr_idle_scan, (int)y);
10581 }
10582 
10583 /**
10584  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
10585  * @env: The load balancing environment.
10586  * @sds: variable to hold the statistics for this sched_domain.
10587  */
10588 
update_sd_lb_stats(struct lb_env * env,struct sd_lb_stats * sds)10589 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
10590 {
10591 	struct sched_group *sg = env->sd->groups;
10592 	struct sg_lb_stats *local = &sds->local_stat;
10593 	struct sg_lb_stats tmp_sgs;
10594 	unsigned long sum_util = 0;
10595 	int sg_status = 0;
10596 
10597 	do {
10598 		struct sg_lb_stats *sgs = &tmp_sgs;
10599 		int local_group;
10600 
10601 		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
10602 		if (local_group) {
10603 			sds->local = sg;
10604 			sgs = local;
10605 
10606 			if (env->idle != CPU_NEWLY_IDLE ||
10607 			    time_after_eq(jiffies, sg->sgc->next_update))
10608 				update_group_capacity(env->sd, env->dst_cpu);
10609 		}
10610 
10611 		update_sg_lb_stats(env, sds, sg, sgs, &sg_status);
10612 
10613 		if (local_group)
10614 			goto next_group;
10615 
10616 
10617 		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
10618 			sds->busiest = sg;
10619 			sds->busiest_stat = *sgs;
10620 		}
10621 
10622 next_group:
10623 		/* Now, start updating sd_lb_stats */
10624 		sds->total_load += sgs->group_load;
10625 		sds->total_capacity += sgs->group_capacity;
10626 
10627 		sum_util += sgs->group_util;
10628 		sg = sg->next;
10629 	} while (sg != env->sd->groups);
10630 
10631 	/*
10632 	 * Indicate that the child domain of the busiest group prefers tasks
10633 	 * go to a child's sibling domains first. NB the flags of a sched group
10634 	 * are those of the child domain.
10635 	 */
10636 	if (sds->busiest)
10637 		sds->prefer_sibling = !!(sds->busiest->flags & SD_PREFER_SIBLING);
10638 
10639 
10640 	if (env->sd->flags & SD_NUMA)
10641 		env->fbq_type = fbq_classify_group(&sds->busiest_stat);
10642 
10643 	if (!env->sd->parent) {
10644 		/* update overload indicator if we are at root domain */
10645 		WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD);
10646 
10647 		/* Update over-utilization (tipping point, U >= 0) indicator */
10648 		set_rd_overutilized_status(env->dst_rq->rd,
10649 					   sg_status & SG_OVERUTILIZED);
10650 	} else if (sg_status & SG_OVERUTILIZED) {
10651 		set_rd_overutilized_status(env->dst_rq->rd, SG_OVERUTILIZED);
10652 	}
10653 
10654 	update_idle_cpu_scan(env, sum_util);
10655 }
10656 
10657 /**
10658  * calculate_imbalance - Calculate the amount of imbalance present within the
10659  *			 groups of a given sched_domain during load balance.
10660  * @env: load balance environment
10661  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
10662  */
calculate_imbalance(struct lb_env * env,struct sd_lb_stats * sds)10663 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
10664 {
10665 	struct sg_lb_stats *local, *busiest;
10666 
10667 	local = &sds->local_stat;
10668 	busiest = &sds->busiest_stat;
10669 
10670 	if (busiest->group_type == group_misfit_task) {
10671 		if (env->sd->flags & SD_ASYM_CPUCAPACITY) {
10672 			/* Set imbalance to allow misfit tasks to be balanced. */
10673 			env->migration_type = migrate_misfit;
10674 			env->imbalance = 1;
10675 		} else {
10676 			/*
10677 			 * Set load imbalance to allow moving task from cpu
10678 			 * with reduced capacity.
10679 			 */
10680 			env->migration_type = migrate_load;
10681 			env->imbalance = busiest->group_misfit_task_load;
10682 		}
10683 		return;
10684 	}
10685 
10686 	if (busiest->group_type == group_asym_packing) {
10687 		/*
10688 		 * In case of asym capacity, we will try to migrate all load to
10689 		 * the preferred CPU.
10690 		 */
10691 		env->migration_type = migrate_task;
10692 		env->imbalance = busiest->sum_h_nr_running;
10693 		return;
10694 	}
10695 
10696 	if (busiest->group_type == group_smt_balance) {
10697 		/* Reduce number of tasks sharing CPU capacity */
10698 		env->migration_type = migrate_task;
10699 		env->imbalance = 1;
10700 		return;
10701 	}
10702 
10703 	if (busiest->group_type == group_imbalanced) {
10704 		/*
10705 		 * In the group_imb case we cannot rely on group-wide averages
10706 		 * to ensure CPU-load equilibrium, try to move any task to fix
10707 		 * the imbalance. The next load balance will take care of
10708 		 * balancing back the system.
10709 		 */
10710 		env->migration_type = migrate_task;
10711 		env->imbalance = 1;
10712 		return;
10713 	}
10714 
10715 	/*
10716 	 * Try to use spare capacity of local group without overloading it or
10717 	 * emptying busiest.
10718 	 */
10719 	if (local->group_type == group_has_spare) {
10720 		if ((busiest->group_type > group_fully_busy) &&
10721 		    !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
10722 			/*
10723 			 * If busiest is overloaded, try to fill spare
10724 			 * capacity. This might end up creating spare capacity
10725 			 * in busiest or busiest still being overloaded but
10726 			 * there is no simple way to directly compute the
10727 			 * amount of load to migrate in order to balance the
10728 			 * system.
10729 			 */
10730 			env->migration_type = migrate_util;
10731 			env->imbalance = max(local->group_capacity, local->group_util) -
10732 					 local->group_util;
10733 
10734 			/*
10735 			 * In some cases, the group's utilization is max or even
10736 			 * higher than capacity because of migrations but the
10737 			 * local CPU is (newly) idle. There is at least one
10738 			 * waiting task in this overloaded busiest group. Let's
10739 			 * try to pull it.
10740 			 */
10741 			if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
10742 				env->migration_type = migrate_task;
10743 				env->imbalance = 1;
10744 			}
10745 
10746 			return;
10747 		}
10748 
10749 		if (busiest->group_weight == 1 || sds->prefer_sibling) {
10750 			/*
10751 			 * When prefer sibling, evenly spread running tasks on
10752 			 * groups.
10753 			 */
10754 			env->migration_type = migrate_task;
10755 			env->imbalance = sibling_imbalance(env, sds, busiest, local);
10756 		} else {
10757 
10758 			/*
10759 			 * If there is no overload, we just want to even the number of
10760 			 * idle cpus.
10761 			 */
10762 			env->migration_type = migrate_task;
10763 			env->imbalance = max_t(long, 0,
10764 					       (local->idle_cpus - busiest->idle_cpus));
10765 		}
10766 
10767 #ifdef CONFIG_NUMA
10768 		/* Consider allowing a small imbalance between NUMA groups */
10769 		if (env->sd->flags & SD_NUMA) {
10770 			env->imbalance = adjust_numa_imbalance(env->imbalance,
10771 							       local->sum_nr_running + 1,
10772 							       env->sd->imb_numa_nr);
10773 		}
10774 #endif
10775 
10776 		/* Number of tasks to move to restore balance */
10777 		env->imbalance >>= 1;
10778 
10779 		return;
10780 	}
10781 
10782 	/*
10783 	 * Local is fully busy but has to take more load to relieve the
10784 	 * busiest group
10785 	 */
10786 	if (local->group_type < group_overloaded) {
10787 		/*
10788 		 * Local will become overloaded so the avg_load metrics are
10789 		 * finally needed.
10790 		 */
10791 
10792 		local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
10793 				  local->group_capacity;
10794 
10795 		/*
10796 		 * If the local group is more loaded than the selected
10797 		 * busiest group don't try to pull any tasks.
10798 		 */
10799 		if (local->avg_load >= busiest->avg_load) {
10800 			env->imbalance = 0;
10801 			return;
10802 		}
10803 
10804 		sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
10805 				sds->total_capacity;
10806 
10807 		/*
10808 		 * If the local group is more loaded than the average system
10809 		 * load, don't try to pull any tasks.
10810 		 */
10811 		if (local->avg_load >= sds->avg_load) {
10812 			env->imbalance = 0;
10813 			return;
10814 		}
10815 
10816 	}
10817 
10818 	/*
10819 	 * Both group are or will become overloaded and we're trying to get all
10820 	 * the CPUs to the average_load, so we don't want to push ourselves
10821 	 * above the average load, nor do we wish to reduce the max loaded CPU
10822 	 * below the average load. At the same time, we also don't want to
10823 	 * reduce the group load below the group capacity. Thus we look for
10824 	 * the minimum possible imbalance.
10825 	 */
10826 	env->migration_type = migrate_load;
10827 	env->imbalance = min(
10828 		(busiest->avg_load - sds->avg_load) * busiest->group_capacity,
10829 		(sds->avg_load - local->avg_load) * local->group_capacity
10830 	) / SCHED_CAPACITY_SCALE;
10831 }
10832 
10833 /******* find_busiest_group() helpers end here *********************/
10834 
10835 /*
10836  * Decision matrix according to the local and busiest group type:
10837  *
10838  * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
10839  * has_spare        nr_idle   balanced   N/A    N/A  balanced   balanced
10840  * fully_busy       nr_idle   nr_idle    N/A    N/A  balanced   balanced
10841  * misfit_task      force     N/A        N/A    N/A  N/A        N/A
10842  * asym_packing     force     force      N/A    N/A  force      force
10843  * imbalanced       force     force      N/A    N/A  force      force
10844  * overloaded       force     force      N/A    N/A  force      avg_load
10845  *
10846  * N/A :      Not Applicable because already filtered while updating
10847  *            statistics.
10848  * balanced : The system is balanced for these 2 groups.
10849  * force :    Calculate the imbalance as load migration is probably needed.
10850  * avg_load : Only if imbalance is significant enough.
10851  * nr_idle :  dst_cpu is not busy and the number of idle CPUs is quite
10852  *            different in groups.
10853  */
10854 
10855 /**
10856  * find_busiest_group - Returns the busiest group within the sched_domain
10857  * if there is an imbalance.
10858  * @env: The load balancing environment.
10859  *
10860  * Also calculates the amount of runnable load which should be moved
10861  * to restore balance.
10862  *
10863  * Return:	- The busiest group if imbalance exists.
10864  */
find_busiest_group(struct lb_env * env)10865 static struct sched_group *find_busiest_group(struct lb_env *env)
10866 {
10867 	struct sg_lb_stats *local, *busiest;
10868 	struct sd_lb_stats sds;
10869 
10870 	init_sd_lb_stats(&sds);
10871 
10872 	/*
10873 	 * Compute the various statistics relevant for load balancing at
10874 	 * this level.
10875 	 */
10876 	update_sd_lb_stats(env, &sds);
10877 
10878 	/* There is no busy sibling group to pull tasks from */
10879 	if (!sds.busiest)
10880 		goto out_balanced;
10881 
10882 	busiest = &sds.busiest_stat;
10883 
10884 	/* Misfit tasks should be dealt with regardless of the avg load */
10885 	if (busiest->group_type == group_misfit_task)
10886 		goto force_balance;
10887 
10888 	if (sched_energy_enabled()) {
10889 		struct root_domain *rd = env->dst_rq->rd;
10890 		int out_balance = 1;
10891 
10892 		trace_android_rvh_find_busiest_group(sds.busiest, env->dst_rq,
10893 					&out_balance);
10894 		if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)
10895 					&& out_balance)
10896 			goto out_balanced;
10897 	}
10898 
10899 	/* ASYM feature bypasses nice load balance check */
10900 	if (busiest->group_type == group_asym_packing)
10901 		goto force_balance;
10902 
10903 	/*
10904 	 * If the busiest group is imbalanced the below checks don't
10905 	 * work because they assume all things are equal, which typically
10906 	 * isn't true due to cpus_ptr constraints and the like.
10907 	 */
10908 	if (busiest->group_type == group_imbalanced)
10909 		goto force_balance;
10910 
10911 	local = &sds.local_stat;
10912 	/*
10913 	 * If the local group is busier than the selected busiest group
10914 	 * don't try and pull any tasks.
10915 	 */
10916 	if (local->group_type > busiest->group_type)
10917 		goto out_balanced;
10918 
10919 	/*
10920 	 * When groups are overloaded, use the avg_load to ensure fairness
10921 	 * between tasks.
10922 	 */
10923 	if (local->group_type == group_overloaded) {
10924 		/*
10925 		 * If the local group is more loaded than the selected
10926 		 * busiest group don't try to pull any tasks.
10927 		 */
10928 		if (local->avg_load >= busiest->avg_load)
10929 			goto out_balanced;
10930 
10931 		/* XXX broken for overlapping NUMA groups */
10932 		sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) /
10933 				sds.total_capacity;
10934 
10935 		/*
10936 		 * Don't pull any tasks if this group is already above the
10937 		 * domain average load.
10938 		 */
10939 		if (local->avg_load >= sds.avg_load)
10940 			goto out_balanced;
10941 
10942 		/*
10943 		 * If the busiest group is more loaded, use imbalance_pct to be
10944 		 * conservative.
10945 		 */
10946 		if (100 * busiest->avg_load <=
10947 				env->sd->imbalance_pct * local->avg_load)
10948 			goto out_balanced;
10949 	}
10950 
10951 	/*
10952 	 * Try to move all excess tasks to a sibling domain of the busiest
10953 	 * group's child domain.
10954 	 */
10955 	if (sds.prefer_sibling && local->group_type == group_has_spare &&
10956 	    sibling_imbalance(env, &sds, busiest, local) > 1)
10957 		goto force_balance;
10958 
10959 	if (busiest->group_type != group_overloaded) {
10960 		if (env->idle == CPU_NOT_IDLE) {
10961 			/*
10962 			 * If the busiest group is not overloaded (and as a
10963 			 * result the local one too) but this CPU is already
10964 			 * busy, let another idle CPU try to pull task.
10965 			 */
10966 			goto out_balanced;
10967 		}
10968 
10969 		if (busiest->group_type == group_smt_balance &&
10970 		    smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
10971 			/* Let non SMT CPU pull from SMT CPU sharing with sibling */
10972 			goto force_balance;
10973 		}
10974 
10975 		if (busiest->group_weight > 1 &&
10976 		    local->idle_cpus <= (busiest->idle_cpus + 1)) {
10977 			/*
10978 			 * If the busiest group is not overloaded
10979 			 * and there is no imbalance between this and busiest
10980 			 * group wrt idle CPUs, it is balanced. The imbalance
10981 			 * becomes significant if the diff is greater than 1
10982 			 * otherwise we might end up to just move the imbalance
10983 			 * on another group. Of course this applies only if
10984 			 * there is more than 1 CPU per group.
10985 			 */
10986 			goto out_balanced;
10987 		}
10988 
10989 		if (busiest->sum_h_nr_running == 1) {
10990 			/*
10991 			 * busiest doesn't have any tasks waiting to run
10992 			 */
10993 			goto out_balanced;
10994 		}
10995 	}
10996 
10997 force_balance:
10998 	/* Looks like there is an imbalance. Compute it */
10999 	calculate_imbalance(env, &sds);
11000 	return env->imbalance ? sds.busiest : NULL;
11001 
11002 out_balanced:
11003 	env->imbalance = 0;
11004 	return NULL;
11005 }
11006 
11007 /*
11008  * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
11009  */
find_busiest_queue(struct lb_env * env,struct sched_group * group)11010 static struct rq *find_busiest_queue(struct lb_env *env,
11011 				     struct sched_group *group)
11012 {
11013 	struct rq *busiest = NULL, *rq;
11014 	unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
11015 	unsigned int busiest_nr = 0;
11016 	int i, done = 0;
11017 
11018 	trace_android_rvh_find_busiest_queue(env->dst_cpu, group, env->cpus,
11019 					     &busiest, &done);
11020 	if (done)
11021 		return busiest;
11022 
11023 	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
11024 		unsigned long capacity, load, util;
11025 		unsigned int nr_running;
11026 		enum fbq_type rt;
11027 
11028 		rq = cpu_rq(i);
11029 		rt = fbq_classify_rq(rq);
11030 
11031 		/*
11032 		 * We classify groups/runqueues into three groups:
11033 		 *  - regular: there are !numa tasks
11034 		 *  - remote:  there are numa tasks that run on the 'wrong' node
11035 		 *  - all:     there is no distinction
11036 		 *
11037 		 * In order to avoid migrating ideally placed numa tasks,
11038 		 * ignore those when there's better options.
11039 		 *
11040 		 * If we ignore the actual busiest queue to migrate another
11041 		 * task, the next balance pass can still reduce the busiest
11042 		 * queue by moving tasks around inside the node.
11043 		 *
11044 		 * If we cannot move enough load due to this classification
11045 		 * the next pass will adjust the group classification and
11046 		 * allow migration of more tasks.
11047 		 *
11048 		 * Both cases only affect the total convergence complexity.
11049 		 */
11050 		if (rt > env->fbq_type)
11051 			continue;
11052 
11053 		nr_running = rq->cfs.h_nr_running;
11054 		if (!nr_running)
11055 			continue;
11056 
11057 		capacity = capacity_of(i);
11058 
11059 		/*
11060 		 * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
11061 		 * eventually lead to active_balancing high->low capacity.
11062 		 * Higher per-CPU capacity is considered better than balancing
11063 		 * average load.
11064 		 */
11065 		if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
11066 		    rq->misfit_reason == MISFIT_PERF &&
11067 		    !capacity_greater(capacity_of(env->dst_cpu), capacity) &&
11068 		    nr_running == 1)
11069 			continue;
11070 
11071 		/*
11072 		 * Make sure we only pull tasks from a CPU of lower priority
11073 		 * when balancing between SMT siblings.
11074 		 *
11075 		 * If balancing between cores, let lower priority CPUs help
11076 		 * SMT cores with more than one busy sibling.
11077 		 */
11078 		if ((env->sd->flags & SD_ASYM_PACKING) &&
11079 		    sched_use_asym_prio(env->sd, i) &&
11080 		    sched_asym_prefer(i, env->dst_cpu) &&
11081 		    nr_running == 1)
11082 			continue;
11083 
11084 		switch (env->migration_type) {
11085 		case migrate_load:
11086 			/*
11087 			 * When comparing with load imbalance, use cpu_load()
11088 			 * which is not scaled with the CPU capacity.
11089 			 */
11090 			load = cpu_load(rq);
11091 
11092 			if (nr_running == 1 && load > env->imbalance &&
11093 			    !check_cpu_capacity(rq, env->sd))
11094 				break;
11095 
11096 			/*
11097 			 * For the load comparisons with the other CPUs,
11098 			 * consider the cpu_load() scaled with the CPU
11099 			 * capacity, so that the load can be moved away
11100 			 * from the CPU that is potentially running at a
11101 			 * lower capacity.
11102 			 *
11103 			 * Thus we're looking for max(load_i / capacity_i),
11104 			 * crosswise multiplication to rid ourselves of the
11105 			 * division works out to:
11106 			 * load_i * capacity_j > load_j * capacity_i;
11107 			 * where j is our previous maximum.
11108 			 */
11109 			if (load * busiest_capacity > busiest_load * capacity) {
11110 				busiest_load = load;
11111 				busiest_capacity = capacity;
11112 				busiest = rq;
11113 			}
11114 			break;
11115 
11116 		case migrate_util:
11117 			util = cpu_util_cfs_boost(i);
11118 
11119 			/*
11120 			 * Don't try to pull utilization from a CPU with one
11121 			 * running task. Whatever its utilization, we will fail
11122 			 * detach the task.
11123 			 */
11124 			if (nr_running <= 1)
11125 				continue;
11126 
11127 			if (busiest_util < util) {
11128 				busiest_util = util;
11129 				busiest = rq;
11130 			}
11131 			break;
11132 
11133 		case migrate_task:
11134 			if (busiest_nr < nr_running) {
11135 				busiest_nr = nr_running;
11136 				busiest = rq;
11137 			}
11138 			break;
11139 
11140 		case migrate_misfit:
11141 			/*
11142 			 * For ASYM_CPUCAPACITY domains with misfit tasks we
11143 			 * simply seek the "biggest" misfit task.
11144 			 */
11145 			if (rq->misfit_task_load > busiest_load) {
11146 				busiest_load = rq->misfit_task_load;
11147 				busiest = rq;
11148 			}
11149 
11150 			break;
11151 
11152 		}
11153 	}
11154 
11155 	return busiest;
11156 }
11157 
11158 /*
11159  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
11160  * so long as it is large enough.
11161  */
11162 #define MAX_PINNED_INTERVAL	512
11163 
11164 static inline bool
asym_active_balance(struct lb_env * env)11165 asym_active_balance(struct lb_env *env)
11166 {
11167 	/*
11168 	 * ASYM_PACKING needs to force migrate tasks from busy but lower
11169 	 * priority CPUs in order to pack all tasks in the highest priority
11170 	 * CPUs. When done between cores, do it only if the whole core if the
11171 	 * whole core is idle.
11172 	 *
11173 	 * If @env::src_cpu is an SMT core with busy siblings, let
11174 	 * the lower priority @env::dst_cpu help it. Do not follow
11175 	 * CPU priority.
11176 	 */
11177 	return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
11178 	       sched_use_asym_prio(env->sd, env->dst_cpu) &&
11179 	       (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
11180 		!sched_use_asym_prio(env->sd, env->src_cpu));
11181 }
11182 
11183 static inline bool
imbalanced_active_balance(struct lb_env * env)11184 imbalanced_active_balance(struct lb_env *env)
11185 {
11186 	struct sched_domain *sd = env->sd;
11187 
11188 	/*
11189 	 * The imbalanced case includes the case of pinned tasks preventing a fair
11190 	 * distribution of the load on the system but also the even distribution of the
11191 	 * threads on a system with spare capacity
11192 	 */
11193 	if ((env->migration_type == migrate_task) &&
11194 	    (sd->nr_balance_failed > sd->cache_nice_tries+2))
11195 		return 1;
11196 
11197 	return 0;
11198 }
11199 
need_active_balance(struct lb_env * env)11200 static int need_active_balance(struct lb_env *env)
11201 {
11202 	struct sched_domain *sd = env->sd;
11203 
11204 	if (asym_active_balance(env))
11205 		return 1;
11206 
11207 	if (imbalanced_active_balance(env))
11208 		return 1;
11209 
11210 	/*
11211 	 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
11212 	 * It's worth migrating the task if the src_cpu's capacity is reduced
11213 	 * because of other sched_class or IRQs if more capacity stays
11214 	 * available on dst_cpu.
11215 	 */
11216 	if ((env->idle != CPU_NOT_IDLE) &&
11217 	    (env->src_rq->cfs.h_nr_running == 1)) {
11218 		if ((check_cpu_capacity(env->src_rq, sd)) &&
11219 		    (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
11220 			return 1;
11221 	}
11222 
11223 	if (env->migration_type == migrate_misfit)
11224 		return 1;
11225 
11226 	return 0;
11227 }
11228 
11229 static int active_load_balance_cpu_stop(void *data);
11230 
should_we_balance(struct lb_env * env)11231 static int should_we_balance(struct lb_env *env)
11232 {
11233 	struct cpumask *swb_cpus = this_cpu_cpumask_var_ptr(should_we_balance_tmpmask);
11234 	struct sched_group *sg = env->sd->groups;
11235 	int cpu, idle_smt = -1;
11236 
11237 	/*
11238 	 * Ensure the balancing environment is consistent; can happen
11239 	 * when the softirq triggers 'during' hotplug.
11240 	 */
11241 	if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
11242 		return 0;
11243 
11244 	/*
11245 	 * In the newly idle case, we will allow all the CPUs
11246 	 * to do the newly idle load balance.
11247 	 *
11248 	 * However, we bail out if we already have tasks or a wakeup pending,
11249 	 * to optimize wakeup latency.
11250 	 */
11251 	if (env->idle == CPU_NEWLY_IDLE) {
11252 		if (env->dst_rq->nr_running > 0 || env->dst_rq->ttwu_pending)
11253 			return 0;
11254 		return 1;
11255 	}
11256 
11257 	cpumask_copy(swb_cpus, group_balance_mask(sg));
11258 	/* Try to find first idle CPU */
11259 	for_each_cpu_and(cpu, swb_cpus, env->cpus) {
11260 		if (!idle_cpu(cpu))
11261 			continue;
11262 
11263 		/*
11264 		 * Don't balance to idle SMT in busy core right away when
11265 		 * balancing cores, but remember the first idle SMT CPU for
11266 		 * later consideration.  Find CPU on an idle core first.
11267 		 */
11268 		if (!(env->sd->flags & SD_SHARE_CPUCAPACITY) && !is_core_idle(cpu)) {
11269 			if (idle_smt == -1)
11270 				idle_smt = cpu;
11271 			/*
11272 			 * If the core is not idle, and first SMT sibling which is
11273 			 * idle has been found, then its not needed to check other
11274 			 * SMT siblings for idleness:
11275 			 */
11276 #ifdef CONFIG_SCHED_SMT
11277 			cpumask_andnot(swb_cpus, swb_cpus, cpu_smt_mask(cpu));
11278 #endif
11279 			continue;
11280 		}
11281 
11282 		/*
11283 		 * Are we the first idle core in a non-SMT domain or higher,
11284 		 * or the first idle CPU in a SMT domain?
11285 		 */
11286 		return cpu == env->dst_cpu;
11287 	}
11288 
11289 	/* Are we the first idle CPU with busy siblings? */
11290 	if (idle_smt != -1)
11291 		return idle_smt == env->dst_cpu;
11292 
11293 	/* Are we the first CPU of this group ? */
11294 	return group_balance_cpu(sg) == env->dst_cpu;
11295 }
11296 
11297 /*
11298  * Check this_cpu to ensure it is balanced within domain. Attempt to move
11299  * tasks if there is an imbalance.
11300  */
load_balance(int this_cpu,struct rq * this_rq,struct sched_domain * sd,enum cpu_idle_type idle,int * continue_balancing)11301 static int load_balance(int this_cpu, struct rq *this_rq,
11302 			struct sched_domain *sd, enum cpu_idle_type idle,
11303 			int *continue_balancing)
11304 {
11305 	int ld_moved, cur_ld_moved, active_balance = 0;
11306 	struct sched_domain *sd_parent = sd->parent;
11307 	struct sched_group *group;
11308 	struct rq *busiest;
11309 	struct rq_flags rf;
11310 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
11311 	struct lb_env env = {
11312 		.sd		= sd,
11313 		.dst_cpu	= this_cpu,
11314 		.dst_rq		= this_rq,
11315 		.dst_grpmask    = group_balance_mask(sd->groups),
11316 		.idle		= idle,
11317 		.loop_break	= SCHED_NR_MIGRATE_BREAK,
11318 		.cpus		= cpus,
11319 		.fbq_type	= all,
11320 		.tasks		= LIST_HEAD_INIT(env.tasks),
11321 	};
11322 
11323 	cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
11324 
11325 	schedstat_inc(sd->lb_count[idle]);
11326 
11327 redo:
11328 	if (!should_we_balance(&env)) {
11329 		*continue_balancing = 0;
11330 		goto out_balanced;
11331 	}
11332 
11333 	group = find_busiest_group(&env);
11334 	if (!group) {
11335 		schedstat_inc(sd->lb_nobusyg[idle]);
11336 		goto out_balanced;
11337 	}
11338 
11339 	busiest = find_busiest_queue(&env, group);
11340 	if (!busiest) {
11341 		schedstat_inc(sd->lb_nobusyq[idle]);
11342 		goto out_balanced;
11343 	}
11344 
11345 	WARN_ON_ONCE(busiest == env.dst_rq);
11346 
11347 	schedstat_add(sd->lb_imbalance[idle], env.imbalance);
11348 
11349 	env.src_cpu = busiest->cpu;
11350 	env.src_rq = busiest;
11351 
11352 	ld_moved = 0;
11353 	/* Clear this flag as soon as we find a pullable task */
11354 	env.flags |= LBF_ALL_PINNED;
11355 	if (busiest->nr_running > 1) {
11356 		/*
11357 		 * Attempt to move tasks. If find_busiest_group has found
11358 		 * an imbalance but busiest->nr_running <= 1, the group is
11359 		 * still unbalanced. ld_moved simply stays zero, so it is
11360 		 * correctly treated as an imbalance.
11361 		 */
11362 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
11363 
11364 more_balance:
11365 		rq_lock_irqsave(busiest, &rf);
11366 		env.src_rq_rf = &rf;
11367 		update_rq_clock(busiest);
11368 
11369 		/*
11370 		 * cur_ld_moved - load moved in current iteration
11371 		 * ld_moved     - cumulative load moved across iterations
11372 		 */
11373 		cur_ld_moved = detach_tasks(&env);
11374 
11375 		/*
11376 		 * We've detached some tasks from busiest_rq. Every
11377 		 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
11378 		 * unlock busiest->lock, and we are able to be sure
11379 		 * that nobody can manipulate the tasks in parallel.
11380 		 * See task_rq_lock() family for the details.
11381 		 */
11382 
11383 		rq_unlock(busiest, &rf);
11384 
11385 		if (cur_ld_moved) {
11386 			attach_tasks(&env);
11387 			ld_moved += cur_ld_moved;
11388 		}
11389 
11390 		local_irq_restore(rf.flags);
11391 
11392 		if (env.flags & LBF_NEED_BREAK) {
11393 			env.flags &= ~LBF_NEED_BREAK;
11394 			goto more_balance;
11395 		}
11396 
11397 		/*
11398 		 * Revisit (affine) tasks on src_cpu that couldn't be moved to
11399 		 * us and move them to an alternate dst_cpu in our sched_group
11400 		 * where they can run. The upper limit on how many times we
11401 		 * iterate on same src_cpu is dependent on number of CPUs in our
11402 		 * sched_group.
11403 		 *
11404 		 * This changes load balance semantics a bit on who can move
11405 		 * load to a given_cpu. In addition to the given_cpu itself
11406 		 * (or a ilb_cpu acting on its behalf where given_cpu is
11407 		 * nohz-idle), we now have balance_cpu in a position to move
11408 		 * load to given_cpu. In rare situations, this may cause
11409 		 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
11410 		 * _independently_ and at _same_ time to move some load to
11411 		 * given_cpu) causing excess load to be moved to given_cpu.
11412 		 * This however should not happen so much in practice and
11413 		 * moreover subsequent load balance cycles should correct the
11414 		 * excess load moved.
11415 		 */
11416 		if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
11417 
11418 			/* Prevent to re-select dst_cpu via env's CPUs */
11419 			__cpumask_clear_cpu(env.dst_cpu, env.cpus);
11420 
11421 			env.dst_rq	 = cpu_rq(env.new_dst_cpu);
11422 			env.dst_cpu	 = env.new_dst_cpu;
11423 			env.flags	&= ~LBF_DST_PINNED;
11424 			env.loop	 = 0;
11425 			env.loop_break	 = SCHED_NR_MIGRATE_BREAK;
11426 
11427 			/*
11428 			 * Go back to "more_balance" rather than "redo" since we
11429 			 * need to continue with same src_cpu.
11430 			 */
11431 			goto more_balance;
11432 		}
11433 
11434 		/*
11435 		 * We failed to reach balance because of affinity.
11436 		 */
11437 		if (sd_parent) {
11438 			int *group_imbalance = &sd_parent->groups->sgc->imbalance;
11439 
11440 			if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
11441 				*group_imbalance = 1;
11442 		}
11443 
11444 		/* All tasks on this runqueue were pinned by CPU affinity */
11445 		if (unlikely(env.flags & LBF_ALL_PINNED)) {
11446 			__cpumask_clear_cpu(cpu_of(busiest), cpus);
11447 			/*
11448 			 * Attempting to continue load balancing at the current
11449 			 * sched_domain level only makes sense if there are
11450 			 * active CPUs remaining as possible busiest CPUs to
11451 			 * pull load from which are not contained within the
11452 			 * destination group that is receiving any migrated
11453 			 * load.
11454 			 */
11455 			if (!cpumask_subset(cpus, env.dst_grpmask)) {
11456 				env.loop = 0;
11457 				env.loop_break = SCHED_NR_MIGRATE_BREAK;
11458 				goto redo;
11459 			}
11460 			goto out_all_pinned;
11461 		}
11462 	}
11463 
11464 	if (!ld_moved) {
11465 		schedstat_inc(sd->lb_failed[idle]);
11466 		/*
11467 		 * Increment the failure counter only on periodic balance.
11468 		 * We do not want newidle balance, which can be very
11469 		 * frequent, pollute the failure counter causing
11470 		 * excessive cache_hot migrations and active balances.
11471 		 */
11472 		if (idle != CPU_NEWLY_IDLE)
11473 			sd->nr_balance_failed++;
11474 
11475 		if (need_active_balance(&env)) {
11476 			unsigned long flags;
11477 
11478 			raw_spin_rq_lock_irqsave(busiest, flags);
11479 
11480 			/*
11481 			 * Don't kick the active_load_balance_cpu_stop,
11482 			 * if the curr task on busiest CPU can't be
11483 			 * moved to this_cpu:
11484 			 */
11485 			if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
11486 				raw_spin_rq_unlock_irqrestore(busiest, flags);
11487 				goto out_one_pinned;
11488 			}
11489 
11490 			/* Record that we found at least one task that could run on this_cpu */
11491 			env.flags &= ~LBF_ALL_PINNED;
11492 
11493 			/*
11494 			 * ->active_balance synchronizes accesses to
11495 			 * ->active_balance_work.  Once set, it's cleared
11496 			 * only after active load balance is finished.
11497 			 */
11498 			if (!busiest->active_balance) {
11499 				busiest->active_balance = 1;
11500 				busiest->push_cpu = this_cpu;
11501 				active_balance = 1;
11502 			}
11503 
11504 			preempt_disable();
11505 			raw_spin_rq_unlock_irqrestore(busiest, flags);
11506 			if (active_balance) {
11507 				stop_one_cpu_nowait(cpu_of(busiest),
11508 					active_load_balance_cpu_stop, busiest,
11509 					&busiest->active_balance_work);
11510 			}
11511 			preempt_enable();
11512 		}
11513 	} else {
11514 		sd->nr_balance_failed = 0;
11515 	}
11516 
11517 	if (likely(!active_balance) || need_active_balance(&env)) {
11518 		/* We were unbalanced, so reset the balancing interval */
11519 		sd->balance_interval = sd->min_interval;
11520 	}
11521 
11522 	goto out;
11523 
11524 out_balanced:
11525 	/*
11526 	 * We reach balance although we may have faced some affinity
11527 	 * constraints. Clear the imbalance flag only if other tasks got
11528 	 * a chance to move and fix the imbalance.
11529 	 */
11530 	if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
11531 		int *group_imbalance = &sd_parent->groups->sgc->imbalance;
11532 
11533 		if (*group_imbalance)
11534 			*group_imbalance = 0;
11535 	}
11536 
11537 out_all_pinned:
11538 	/*
11539 	 * We reach balance because all tasks are pinned at this level so
11540 	 * we can't migrate them. Let the imbalance flag set so parent level
11541 	 * can try to migrate them.
11542 	 */
11543 	schedstat_inc(sd->lb_balanced[idle]);
11544 
11545 	sd->nr_balance_failed = 0;
11546 
11547 out_one_pinned:
11548 	ld_moved = 0;
11549 
11550 	/*
11551 	 * newidle_balance() disregards balance intervals, so we could
11552 	 * repeatedly reach this code, which would lead to balance_interval
11553 	 * skyrocketing in a short amount of time. Skip the balance_interval
11554 	 * increase logic to avoid that.
11555 	 */
11556 	if (env.idle == CPU_NEWLY_IDLE)
11557 		goto out;
11558 
11559 	/* tune up the balancing interval */
11560 	if ((env.flags & LBF_ALL_PINNED &&
11561 	     sd->balance_interval < MAX_PINNED_INTERVAL) ||
11562 	    sd->balance_interval < sd->max_interval)
11563 		sd->balance_interval *= 2;
11564 out:
11565 	return ld_moved;
11566 }
11567 
11568 static inline unsigned long
get_sd_balance_interval(struct sched_domain * sd,int cpu_busy)11569 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
11570 {
11571 	unsigned long interval = sd->balance_interval;
11572 
11573 	if (cpu_busy)
11574 		interval *= sd->busy_factor;
11575 
11576 	/* scale ms to jiffies */
11577 	interval = msecs_to_jiffies(interval);
11578 
11579 	/*
11580 	 * Reduce likelihood of busy balancing at higher domains racing with
11581 	 * balancing at lower domains by preventing their balancing periods
11582 	 * from being multiples of each other.
11583 	 */
11584 	if (cpu_busy)
11585 		interval -= 1;
11586 
11587 	interval = clamp(interval, 1UL, max_load_balance_interval);
11588 
11589 	return interval;
11590 }
11591 
11592 static inline void
update_next_balance(struct sched_domain * sd,unsigned long * next_balance)11593 update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
11594 {
11595 	unsigned long interval, next;
11596 
11597 	/* used by idle balance, so cpu_busy = 0 */
11598 	interval = get_sd_balance_interval(sd, 0);
11599 	next = sd->last_balance + interval;
11600 
11601 	if (time_after(*next_balance, next))
11602 		*next_balance = next;
11603 }
11604 
11605 /*
11606  * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
11607  * running tasks off the busiest CPU onto idle CPUs. It requires at
11608  * least 1 task to be running on each physical CPU where possible, and
11609  * avoids physical / logical imbalances.
11610  */
active_load_balance_cpu_stop(void * data)11611 static int active_load_balance_cpu_stop(void *data)
11612 {
11613 	struct rq *busiest_rq = data;
11614 	int busiest_cpu = cpu_of(busiest_rq);
11615 	int target_cpu = busiest_rq->push_cpu;
11616 	struct rq *target_rq = cpu_rq(target_cpu);
11617 	struct sched_domain *sd;
11618 	struct task_struct *p = NULL;
11619 	struct rq_flags rf;
11620 
11621 	rq_lock_irq(busiest_rq, &rf);
11622 	/*
11623 	 * Between queueing the stop-work and running it is a hole in which
11624 	 * CPUs can become inactive. We should not move tasks from or to
11625 	 * inactive CPUs.
11626 	 */
11627 	if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
11628 		goto out_unlock;
11629 
11630 	/* Make sure the requested CPU hasn't gone down in the meantime: */
11631 	if (unlikely(busiest_cpu != smp_processor_id() ||
11632 		     !busiest_rq->active_balance))
11633 		goto out_unlock;
11634 
11635 	/* Is there any task to move? */
11636 	if (busiest_rq->nr_running <= 1)
11637 		goto out_unlock;
11638 
11639 	/*
11640 	 * This condition is "impossible", if it occurs
11641 	 * we need to fix it. Originally reported by
11642 	 * Bjorn Helgaas on a 128-CPU setup.
11643 	 */
11644 	WARN_ON_ONCE(busiest_rq == target_rq);
11645 
11646 	/* Search for an sd spanning us and the target CPU. */
11647 	rcu_read_lock();
11648 	for_each_domain(target_cpu, sd) {
11649 		if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
11650 			break;
11651 	}
11652 
11653 	if (likely(sd)) {
11654 		struct lb_env env = {
11655 			.sd		= sd,
11656 			.dst_cpu	= target_cpu,
11657 			.dst_rq		= target_rq,
11658 			.src_cpu	= busiest_rq->cpu,
11659 			.src_rq		= busiest_rq,
11660 			.idle		= CPU_IDLE,
11661 			.flags		= LBF_ACTIVE_LB,
11662 			.src_rq_rf	= &rf,
11663 		};
11664 
11665 		schedstat_inc(sd->alb_count);
11666 		update_rq_clock(busiest_rq);
11667 
11668 		p = detach_one_task(&env);
11669 		if (p) {
11670 			schedstat_inc(sd->alb_pushed);
11671 			/* Active balancing done, reset the failure counter. */
11672 			sd->nr_balance_failed = 0;
11673 		} else {
11674 			schedstat_inc(sd->alb_failed);
11675 		}
11676 	}
11677 	rcu_read_unlock();
11678 out_unlock:
11679 	busiest_rq->active_balance = 0;
11680 	rq_unlock(busiest_rq, &rf);
11681 
11682 	if (p)
11683 		attach_one_task(target_rq, p);
11684 
11685 	local_irq_enable();
11686 
11687 	return 0;
11688 }
11689 
11690 static DEFINE_SPINLOCK(balancing);
11691 
11692 /*
11693  * Scale the max load_balance interval with the number of CPUs in the system.
11694  * This trades load-balance latency on larger machines for less cross talk.
11695  */
update_max_interval(void)11696 void update_max_interval(void)
11697 {
11698 	max_load_balance_interval = HZ*num_online_cpus()/10;
11699 }
11700 
update_newidle_cost(struct sched_domain * sd,u64 cost)11701 static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
11702 {
11703 	if (cost > sd->max_newidle_lb_cost) {
11704 		/*
11705 		 * Track max cost of a domain to make sure to not delay the
11706 		 * next wakeup on the CPU.
11707 		 */
11708 		sd->max_newidle_lb_cost = cost;
11709 		sd->last_decay_max_lb_cost = jiffies;
11710 	} else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) {
11711 		/*
11712 		 * Decay the newidle max times by ~1% per second to ensure that
11713 		 * it is not outdated and the current max cost is actually
11714 		 * shorter.
11715 		 */
11716 		sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * 253) / 256;
11717 		sd->last_decay_max_lb_cost = jiffies;
11718 
11719 		return true;
11720 	}
11721 
11722 	return false;
11723 }
11724 
11725 /*
11726  * It checks each scheduling domain to see if it is due to be balanced,
11727  * and initiates a balancing operation if so.
11728  *
11729  * Balancing parameters are set up in init_sched_domains.
11730  */
rebalance_domains(struct rq * rq,enum cpu_idle_type idle)11731 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
11732 {
11733 	int continue_balancing = 1;
11734 	int cpu = rq->cpu;
11735 	int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
11736 	unsigned long interval;
11737 	struct sched_domain *sd;
11738 	/* Earliest time when we have to do rebalance again */
11739 	unsigned long next_balance = jiffies + 60*HZ;
11740 	int update_next_balance = 0;
11741 	int need_serialize, need_decay = 0;
11742 	u64 max_cost = 0;
11743 
11744 	trace_android_rvh_sched_rebalance_domains(rq, &continue_balancing);
11745 	if (!continue_balancing)
11746 		return;
11747 
11748 	rcu_read_lock();
11749 	for_each_domain(cpu, sd) {
11750 		/*
11751 		 * Decay the newidle max times here because this is a regular
11752 		 * visit to all the domains.
11753 		 */
11754 		need_decay = update_newidle_cost(sd, 0);
11755 		max_cost += sd->max_newidle_lb_cost;
11756 
11757 		/*
11758 		 * Stop the load balance at this level. There is another
11759 		 * CPU in our sched group which is doing load balancing more
11760 		 * actively.
11761 		 */
11762 		if (!continue_balancing) {
11763 			if (need_decay)
11764 				continue;
11765 			break;
11766 		}
11767 
11768 		interval = get_sd_balance_interval(sd, busy);
11769 
11770 		need_serialize = sd->flags & SD_SERIALIZE;
11771 		if (need_serialize) {
11772 			if (!spin_trylock(&balancing))
11773 				goto out;
11774 		}
11775 
11776 		if (time_after_eq(jiffies, sd->last_balance + interval)) {
11777 			if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
11778 				/*
11779 				 * The LBF_DST_PINNED logic could have changed
11780 				 * env->dst_cpu, so we can't know our idle
11781 				 * state even if we migrated tasks. Update it.
11782 				 */
11783 				idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
11784 				busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
11785 			}
11786 			sd->last_balance = jiffies;
11787 			interval = get_sd_balance_interval(sd, busy);
11788 		}
11789 		if (need_serialize)
11790 			spin_unlock(&balancing);
11791 out:
11792 		if (time_after(next_balance, sd->last_balance + interval)) {
11793 			next_balance = sd->last_balance + interval;
11794 			update_next_balance = 1;
11795 		}
11796 	}
11797 	if (need_decay) {
11798 		/*
11799 		 * Ensure the rq-wide value also decays but keep it at a
11800 		 * reasonable floor to avoid funnies with rq->avg_idle.
11801 		 */
11802 		rq->max_idle_balance_cost =
11803 			max((u64)sysctl_sched_migration_cost, max_cost);
11804 	}
11805 	rcu_read_unlock();
11806 
11807 	/*
11808 	 * next_balance will be updated only when there is a need.
11809 	 * When the cpu is attached to null domain for ex, it will not be
11810 	 * updated.
11811 	 */
11812 	if (likely(update_next_balance))
11813 		rq->next_balance = next_balance;
11814 
11815 }
11816 
on_null_domain(struct rq * rq)11817 static inline int on_null_domain(struct rq *rq)
11818 {
11819 	return unlikely(!rcu_dereference_sched(rq->sd));
11820 }
11821 
11822 #ifdef CONFIG_NO_HZ_COMMON
11823 /*
11824  * idle load balancing details
11825  * - When one of the busy CPUs notice that there may be an idle rebalancing
11826  *   needed, they will kick the idle load balancer, which then does idle
11827  *   load balancing for all the idle CPUs.
11828  * - HK_TYPE_MISC CPUs are used for this task, because HK_TYPE_SCHED not set
11829  *   anywhere yet.
11830  */
11831 
find_new_ilb(void)11832 static inline int find_new_ilb(void)
11833 {
11834 	int ilb = -1;
11835 	const struct cpumask *hk_mask;
11836 
11837 	trace_android_rvh_find_new_ilb(nohz.idle_cpus_mask, &ilb);
11838 	if (ilb >= 0)
11839 		return ilb;
11840 
11841 	hk_mask = housekeeping_cpumask(HK_TYPE_MISC);
11842 
11843 	for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) {
11844 
11845 		if (ilb == smp_processor_id())
11846 			continue;
11847 
11848 		if (idle_cpu(ilb))
11849 			return ilb;
11850 	}
11851 
11852 	return nr_cpu_ids;
11853 }
11854 
11855 /*
11856  * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
11857  * idle CPU in the HK_TYPE_MISC housekeeping set (if there is one).
11858  */
kick_ilb(unsigned int flags)11859 static void kick_ilb(unsigned int flags)
11860 {
11861 	int ilb_cpu;
11862 
11863 	/*
11864 	 * Increase nohz.next_balance only when if full ilb is triggered but
11865 	 * not if we only update stats.
11866 	 */
11867 	if (flags & NOHZ_BALANCE_KICK)
11868 		nohz.next_balance = jiffies+1;
11869 
11870 	ilb_cpu = find_new_ilb();
11871 
11872 	if (ilb_cpu >= nr_cpu_ids)
11873 		return;
11874 
11875 	/*
11876 	 * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
11877 	 * the first flag owns it; cleared by nohz_csd_func().
11878 	 */
11879 	flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
11880 	if (flags & NOHZ_KICK_MASK)
11881 		return;
11882 
11883 	/*
11884 	 * This way we generate an IPI on the target CPU which
11885 	 * is idle. And the softirq performing nohz idle load balance
11886 	 * will be run before returning from the IPI.
11887 	 */
11888 	smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
11889 }
11890 
11891 /*
11892  * Current decision point for kicking the idle load balancer in the presence
11893  * of idle CPUs in the system.
11894  */
nohz_balancer_kick(struct rq * rq)11895 static void nohz_balancer_kick(struct rq *rq)
11896 {
11897 	unsigned long now = jiffies;
11898 	struct sched_domain_shared *sds;
11899 	struct sched_domain *sd;
11900 	int nr_busy, i, cpu = rq->cpu;
11901 	unsigned int flags = 0;
11902 	int done = 0;
11903 
11904 	if (unlikely(rq->idle_balance))
11905 		return;
11906 
11907 	/*
11908 	 * We may be recently in ticked or tickless idle mode. At the first
11909 	 * busy tick after returning from idle, we will update the busy stats.
11910 	 */
11911 	nohz_balance_exit_idle(rq);
11912 
11913 	/*
11914 	 * None are in tickless mode and hence no need for NOHZ idle load
11915 	 * balancing.
11916 	 */
11917 	if (likely(!atomic_read(&nohz.nr_cpus)))
11918 		return;
11919 
11920 	if (READ_ONCE(nohz.has_blocked) &&
11921 	    time_after(now, READ_ONCE(nohz.next_blocked)))
11922 		flags = NOHZ_STATS_KICK;
11923 
11924 	if (time_before(now, nohz.next_balance))
11925 		goto out;
11926 
11927 	trace_android_rvh_sched_nohz_balancer_kick(rq, &flags, &done);
11928 	if (done)
11929 		goto out;
11930 
11931 	if (rq->nr_running >= 2) {
11932 		flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
11933 		goto out;
11934 	}
11935 
11936 	rcu_read_lock();
11937 
11938 	sd = rcu_dereference(rq->sd);
11939 	if (sd) {
11940 		/*
11941 		 * If there's a CFS task and the current CPU has reduced
11942 		 * capacity; kick the ILB to see if there's a better CPU to run
11943 		 * on.
11944 		 */
11945 		if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
11946 			flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
11947 			goto unlock;
11948 		}
11949 	}
11950 
11951 	sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
11952 	if (sd) {
11953 		/*
11954 		 * When ASYM_PACKING; see if there's a more preferred CPU
11955 		 * currently idle; in which case, kick the ILB to move tasks
11956 		 * around.
11957 		 *
11958 		 * When balancing betwen cores, all the SMT siblings of the
11959 		 * preferred CPU must be idle.
11960 		 */
11961 		for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
11962 			if (sched_use_asym_prio(sd, i) &&
11963 			    sched_asym_prefer(i, cpu)) {
11964 				flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
11965 				goto unlock;
11966 			}
11967 		}
11968 	}
11969 
11970 	sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
11971 	if (sd) {
11972 		/*
11973 		 * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
11974 		 * to run the misfit task on.
11975 		 */
11976 		if (check_misfit_status(rq, sd)) {
11977 			flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
11978 			goto unlock;
11979 		}
11980 
11981 		/*
11982 		 * For asymmetric systems, we do not want to nicely balance
11983 		 * cache use, instead we want to embrace asymmetry and only
11984 		 * ensure tasks have enough CPU capacity.
11985 		 *
11986 		 * Skip the LLC logic because it's not relevant in that case.
11987 		 */
11988 		goto unlock;
11989 	}
11990 
11991 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
11992 	if (sds) {
11993 		/*
11994 		 * If there is an imbalance between LLC domains (IOW we could
11995 		 * increase the overall cache use), we need some less-loaded LLC
11996 		 * domain to pull some load. Likewise, we may need to spread
11997 		 * load within the current LLC domain (e.g. packed SMT cores but
11998 		 * other CPUs are idle). We can't really know from here how busy
11999 		 * the others are - so just get a nohz balance going if it looks
12000 		 * like this LLC domain has tasks we could move.
12001 		 */
12002 		nr_busy = atomic_read(&sds->nr_busy_cpus);
12003 		if (nr_busy > 1) {
12004 			flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
12005 			goto unlock;
12006 		}
12007 	}
12008 unlock:
12009 	rcu_read_unlock();
12010 out:
12011 	if (READ_ONCE(nohz.needs_update))
12012 		flags |= NOHZ_NEXT_KICK;
12013 
12014 	if (flags)
12015 		kick_ilb(flags);
12016 }
12017 
set_cpu_sd_state_busy(int cpu)12018 static void set_cpu_sd_state_busy(int cpu)
12019 {
12020 	struct sched_domain *sd;
12021 
12022 	rcu_read_lock();
12023 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
12024 
12025 	if (!sd || !sd->nohz_idle)
12026 		goto unlock;
12027 	sd->nohz_idle = 0;
12028 
12029 	atomic_inc(&sd->shared->nr_busy_cpus);
12030 unlock:
12031 	rcu_read_unlock();
12032 }
12033 
nohz_balance_exit_idle(struct rq * rq)12034 void nohz_balance_exit_idle(struct rq *rq)
12035 {
12036 	SCHED_WARN_ON(rq != this_rq());
12037 
12038 	if (likely(!rq->nohz_tick_stopped))
12039 		return;
12040 
12041 	rq->nohz_tick_stopped = 0;
12042 	cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
12043 	atomic_dec(&nohz.nr_cpus);
12044 
12045 	set_cpu_sd_state_busy(rq->cpu);
12046 }
12047 
set_cpu_sd_state_idle(int cpu)12048 static void set_cpu_sd_state_idle(int cpu)
12049 {
12050 	struct sched_domain *sd;
12051 
12052 	rcu_read_lock();
12053 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
12054 
12055 	if (!sd || sd->nohz_idle)
12056 		goto unlock;
12057 	sd->nohz_idle = 1;
12058 
12059 	atomic_dec(&sd->shared->nr_busy_cpus);
12060 unlock:
12061 	rcu_read_unlock();
12062 }
12063 
12064 /*
12065  * This routine will record that the CPU is going idle with tick stopped.
12066  * This info will be used in performing idle load balancing in the future.
12067  */
nohz_balance_enter_idle(int cpu)12068 void nohz_balance_enter_idle(int cpu)
12069 {
12070 	struct rq *rq = cpu_rq(cpu);
12071 
12072 	SCHED_WARN_ON(cpu != smp_processor_id());
12073 
12074 	/* If this CPU is going down, then nothing needs to be done: */
12075 	if (!cpu_active(cpu))
12076 		return;
12077 
12078 	/* Spare idle load balancing on CPUs that don't want to be disturbed: */
12079 	if (!housekeeping_cpu(cpu, HK_TYPE_SCHED))
12080 		return;
12081 
12082 	/*
12083 	 * Can be set safely without rq->lock held
12084 	 * If a clear happens, it will have evaluated last additions because
12085 	 * rq->lock is held during the check and the clear
12086 	 */
12087 	rq->has_blocked_load = 1;
12088 
12089 	/*
12090 	 * The tick is still stopped but load could have been added in the
12091 	 * meantime. We set the nohz.has_blocked flag to trig a check of the
12092 	 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
12093 	 * of nohz.has_blocked can only happen after checking the new load
12094 	 */
12095 	if (rq->nohz_tick_stopped)
12096 		goto out;
12097 
12098 	/* If we're a completely isolated CPU, we don't play: */
12099 	if (on_null_domain(rq))
12100 		return;
12101 
12102 	rq->nohz_tick_stopped = 1;
12103 
12104 	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
12105 	atomic_inc(&nohz.nr_cpus);
12106 
12107 	/*
12108 	 * Ensures that if nohz_idle_balance() fails to observe our
12109 	 * @idle_cpus_mask store, it must observe the @has_blocked
12110 	 * and @needs_update stores.
12111 	 */
12112 	smp_mb__after_atomic();
12113 
12114 	set_cpu_sd_state_idle(cpu);
12115 
12116 	WRITE_ONCE(nohz.needs_update, 1);
12117 out:
12118 	/*
12119 	 * Each time a cpu enter idle, we assume that it has blocked load and
12120 	 * enable the periodic update of the load of idle cpus
12121 	 */
12122 	WRITE_ONCE(nohz.has_blocked, 1);
12123 }
12124 
update_nohz_stats(struct rq * rq)12125 static bool update_nohz_stats(struct rq *rq)
12126 {
12127 	unsigned int cpu = rq->cpu;
12128 
12129 	if (!rq->has_blocked_load)
12130 		return false;
12131 
12132 	if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
12133 		return false;
12134 
12135 	if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
12136 		return true;
12137 
12138 	update_blocked_averages(cpu);
12139 
12140 	return rq->has_blocked_load;
12141 }
12142 
12143 /*
12144  * Internal function that runs load balance for all idle cpus. The load balance
12145  * can be a simple update of blocked load or a complete load balance with
12146  * tasks movement depending of flags.
12147  */
_nohz_idle_balance(struct rq * this_rq,unsigned int flags)12148 static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags)
12149 {
12150 	/* Earliest time when we have to do rebalance again */
12151 	unsigned long now = jiffies;
12152 	unsigned long next_balance = now + 60*HZ;
12153 	bool has_blocked_load = false;
12154 	int update_next_balance = 0;
12155 	int this_cpu = this_rq->cpu;
12156 	int balance_cpu;
12157 	struct rq *rq;
12158 
12159 	SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
12160 
12161 	/*
12162 	 * We assume there will be no idle load after this update and clear
12163 	 * the has_blocked flag. If a cpu enters idle in the mean time, it will
12164 	 * set the has_blocked flag and trigger another update of idle load.
12165 	 * Because a cpu that becomes idle, is added to idle_cpus_mask before
12166 	 * setting the flag, we are sure to not clear the state and not
12167 	 * check the load of an idle cpu.
12168 	 *
12169 	 * Same applies to idle_cpus_mask vs needs_update.
12170 	 */
12171 	if (flags & NOHZ_STATS_KICK)
12172 		WRITE_ONCE(nohz.has_blocked, 0);
12173 	if (flags & NOHZ_NEXT_KICK)
12174 		WRITE_ONCE(nohz.needs_update, 0);
12175 
12176 	/*
12177 	 * Ensures that if we miss the CPU, we must see the has_blocked
12178 	 * store from nohz_balance_enter_idle().
12179 	 */
12180 	smp_mb();
12181 
12182 	/*
12183 	 * Start with the next CPU after this_cpu so we will end with this_cpu and let a
12184 	 * chance for other idle cpu to pull load.
12185 	 */
12186 	for_each_cpu_wrap(balance_cpu,  nohz.idle_cpus_mask, this_cpu+1) {
12187 		if (!idle_cpu(balance_cpu))
12188 			continue;
12189 
12190 		/*
12191 		 * If this CPU gets work to do, stop the load balancing
12192 		 * work being done for other CPUs. Next load
12193 		 * balancing owner will pick it up.
12194 		 */
12195 		if (need_resched()) {
12196 			if (flags & NOHZ_STATS_KICK)
12197 				has_blocked_load = true;
12198 			if (flags & NOHZ_NEXT_KICK)
12199 				WRITE_ONCE(nohz.needs_update, 1);
12200 			goto abort;
12201 		}
12202 
12203 		rq = cpu_rq(balance_cpu);
12204 
12205 		if (flags & NOHZ_STATS_KICK)
12206 			has_blocked_load |= update_nohz_stats(rq);
12207 
12208 		/*
12209 		 * If time for next balance is due,
12210 		 * do the balance.
12211 		 */
12212 		if (time_after_eq(jiffies, rq->next_balance)) {
12213 			struct rq_flags rf;
12214 
12215 			rq_lock_irqsave(rq, &rf);
12216 			update_rq_clock(rq);
12217 			rq_unlock_irqrestore(rq, &rf);
12218 
12219 			if (flags & NOHZ_BALANCE_KICK)
12220 				rebalance_domains(rq, CPU_IDLE);
12221 		}
12222 
12223 		if (time_after(next_balance, rq->next_balance)) {
12224 			next_balance = rq->next_balance;
12225 			update_next_balance = 1;
12226 		}
12227 	}
12228 
12229 	/*
12230 	 * next_balance will be updated only when there is a need.
12231 	 * When the CPU is attached to null domain for ex, it will not be
12232 	 * updated.
12233 	 */
12234 	if (likely(update_next_balance))
12235 		nohz.next_balance = next_balance;
12236 
12237 	if (flags & NOHZ_STATS_KICK)
12238 		WRITE_ONCE(nohz.next_blocked,
12239 			   now + msecs_to_jiffies(LOAD_AVG_PERIOD));
12240 
12241 abort:
12242 	/* There is still blocked load, enable periodic update */
12243 	if (has_blocked_load)
12244 		WRITE_ONCE(nohz.has_blocked, 1);
12245 }
12246 
12247 /*
12248  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
12249  * rebalancing for all the cpus for whom scheduler ticks are stopped.
12250  */
nohz_idle_balance(struct rq * this_rq,enum cpu_idle_type idle)12251 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
12252 {
12253 	unsigned int flags = this_rq->nohz_idle_balance;
12254 
12255 	if (!flags)
12256 		return false;
12257 
12258 	this_rq->nohz_idle_balance = 0;
12259 
12260 	if (idle != CPU_IDLE)
12261 		return false;
12262 
12263 	_nohz_idle_balance(this_rq, flags);
12264 
12265 	return true;
12266 }
12267 
12268 /*
12269  * Check if we need to run the ILB for updating blocked load before entering
12270  * idle state.
12271  */
nohz_run_idle_balance(int cpu)12272 void nohz_run_idle_balance(int cpu)
12273 {
12274 	unsigned int flags;
12275 
12276 	flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu));
12277 
12278 	/*
12279 	 * Update the blocked load only if no SCHED_SOFTIRQ is about to happen
12280 	 * (ie NOHZ_STATS_KICK set) and will do the same.
12281 	 */
12282 	if ((flags == NOHZ_NEWILB_KICK) && !need_resched())
12283 		_nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK);
12284 }
12285 
nohz_newidle_balance(struct rq * this_rq)12286 static void nohz_newidle_balance(struct rq *this_rq)
12287 {
12288 	int this_cpu = this_rq->cpu;
12289 
12290 	/*
12291 	 * This CPU doesn't want to be disturbed by scheduler
12292 	 * housekeeping
12293 	 */
12294 	if (!housekeeping_cpu(this_cpu, HK_TYPE_SCHED))
12295 		return;
12296 
12297 	/* Will wake up very soon. No time for doing anything else*/
12298 	if (this_rq->avg_idle < sysctl_sched_migration_cost)
12299 		return;
12300 
12301 	/* Don't need to update blocked load of idle CPUs*/
12302 	if (!READ_ONCE(nohz.has_blocked) ||
12303 	    time_before(jiffies, READ_ONCE(nohz.next_blocked)))
12304 		return;
12305 
12306 	/*
12307 	 * Set the need to trigger ILB in order to update blocked load
12308 	 * before entering idle state.
12309 	 */
12310 	atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu));
12311 }
12312 
12313 #else /* !CONFIG_NO_HZ_COMMON */
nohz_balancer_kick(struct rq * rq)12314 static inline void nohz_balancer_kick(struct rq *rq) { }
12315 
nohz_idle_balance(struct rq * this_rq,enum cpu_idle_type idle)12316 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
12317 {
12318 	return false;
12319 }
12320 
nohz_newidle_balance(struct rq * this_rq)12321 static inline void nohz_newidle_balance(struct rq *this_rq) { }
12322 #endif /* CONFIG_NO_HZ_COMMON */
12323 
12324 /*
12325  * newidle_balance is called by schedule() if this_cpu is about to become
12326  * idle. Attempts to pull tasks from other CPUs.
12327  *
12328  * Returns:
12329  *   < 0 - we released the lock and there are !fair tasks present
12330  *     0 - failed, no new tasks
12331  *   > 0 - success, new (fair) tasks present
12332  */
newidle_balance(struct rq * this_rq,struct rq_flags * rf)12333 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
12334 {
12335 	unsigned long next_balance = jiffies + HZ;
12336 	int this_cpu = this_rq->cpu;
12337 	u64 t0, t1, curr_cost = 0;
12338 	struct sched_domain *sd;
12339 	int pulled_task = 0;
12340 	int done = 0;
12341 
12342 	trace_android_rvh_sched_newidle_balance(this_rq, rf, &pulled_task, &done);
12343 	if (done)
12344 		return pulled_task;
12345 
12346 	update_misfit_status(NULL, this_rq);
12347 
12348 	/*
12349 	 * There is a task waiting to run. No need to search for one.
12350 	 * Return 0; the task will be enqueued when switching to idle.
12351 	 */
12352 	if (this_rq->ttwu_pending)
12353 		return 0;
12354 
12355 	/*
12356 	 * We must set idle_stamp _before_ calling idle_balance(), such that we
12357 	 * measure the duration of idle_balance() as idle time.
12358 	 */
12359 	this_rq->idle_stamp = rq_clock(this_rq);
12360 
12361 	/*
12362 	 * Do not pull tasks towards !active CPUs...
12363 	 */
12364 	if (!cpu_active(this_cpu))
12365 		return 0;
12366 
12367 	/*
12368 	 * This is OK, because current is on_cpu, which avoids it being picked
12369 	 * for load-balance and preemption/IRQs are still disabled avoiding
12370 	 * further scheduler activity on it and we're being very careful to
12371 	 * re-start the picking loop.
12372 	 */
12373 	rq_unpin_lock(this_rq, rf);
12374 
12375 	rcu_read_lock();
12376 	sd = rcu_dereference_check_sched_domain(this_rq->sd);
12377 
12378 	if (!READ_ONCE(this_rq->rd->overload) ||
12379 	    (sd && this_rq->avg_idle < sd->max_newidle_lb_cost)) {
12380 
12381 		if (sd)
12382 			update_next_balance(sd, &next_balance);
12383 		rcu_read_unlock();
12384 
12385 		goto out;
12386 	}
12387 	rcu_read_unlock();
12388 
12389 	raw_spin_rq_unlock(this_rq);
12390 
12391 	t0 = sched_clock_cpu(this_cpu);
12392 	update_blocked_averages(this_cpu);
12393 
12394 	rcu_read_lock();
12395 	for_each_domain(this_cpu, sd) {
12396 		int continue_balancing = 1;
12397 		u64 domain_cost;
12398 
12399 		update_next_balance(sd, &next_balance);
12400 
12401 		if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
12402 			break;
12403 
12404 		if (sd->flags & SD_BALANCE_NEWIDLE) {
12405 
12406 			pulled_task = load_balance(this_cpu, this_rq,
12407 						   sd, CPU_NEWLY_IDLE,
12408 						   &continue_balancing);
12409 
12410 			t1 = sched_clock_cpu(this_cpu);
12411 			domain_cost = t1 - t0;
12412 			update_newidle_cost(sd, domain_cost);
12413 
12414 			curr_cost += domain_cost;
12415 			t0 = t1;
12416 		}
12417 
12418 		/*
12419 		 * Stop searching for tasks to pull if there are
12420 		 * now runnable tasks on this rq.
12421 		 */
12422 		if (pulled_task || this_rq->nr_running > 0 ||
12423 		    this_rq->ttwu_pending)
12424 			break;
12425 	}
12426 	rcu_read_unlock();
12427 
12428 	raw_spin_rq_lock(this_rq);
12429 
12430 	if (curr_cost > this_rq->max_idle_balance_cost)
12431 		this_rq->max_idle_balance_cost = curr_cost;
12432 
12433 	/*
12434 	 * While browsing the domains, we released the rq lock, a task could
12435 	 * have been enqueued in the meantime. Since we're not going idle,
12436 	 * pretend we pulled a task.
12437 	 */
12438 	if (this_rq->cfs.h_nr_running && !pulled_task)
12439 		pulled_task = 1;
12440 
12441 	/* Is there a task of a high priority class? */
12442 	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
12443 		pulled_task = -1;
12444 
12445 out:
12446 	/* Move the next balance forward */
12447 	if (time_after(this_rq->next_balance, next_balance))
12448 		this_rq->next_balance = next_balance;
12449 
12450 	if (pulled_task)
12451 		this_rq->idle_stamp = 0;
12452 	else
12453 		nohz_newidle_balance(this_rq);
12454 
12455 	rq_repin_lock(this_rq, rf);
12456 
12457 	return pulled_task;
12458 }
12459 
12460 /*
12461  * run_rebalance_domains is triggered when needed from the scheduler tick.
12462  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
12463  */
run_rebalance_domains(struct softirq_action * h)12464 static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
12465 {
12466 	struct rq *this_rq = this_rq();
12467 	enum cpu_idle_type idle = this_rq->idle_balance ?
12468 						CPU_IDLE : CPU_NOT_IDLE;
12469 
12470 	/*
12471 	 * If this CPU has a pending nohz_balance_kick, then do the
12472 	 * balancing on behalf of the other idle CPUs whose ticks are
12473 	 * stopped. Do nohz_idle_balance *before* rebalance_domains to
12474 	 * give the idle CPUs a chance to load balance. Else we may
12475 	 * load balance only within the local sched_domain hierarchy
12476 	 * and abort nohz_idle_balance altogether if we pull some load.
12477 	 */
12478 	if (nohz_idle_balance(this_rq, idle))
12479 		return;
12480 
12481 	/* normal load balance */
12482 	update_blocked_averages(this_rq->cpu);
12483 	rebalance_domains(this_rq, idle);
12484 }
12485 
12486 /*
12487  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
12488  */
trigger_load_balance(struct rq * rq)12489 void trigger_load_balance(struct rq *rq)
12490 {
12491 	/*
12492 	 * Don't need to rebalance while attached to NULL domain or
12493 	 * runqueue CPU is not active
12494 	 */
12495 	if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq))))
12496 		return;
12497 
12498 	if (time_after_eq(jiffies, rq->next_balance))
12499 		raise_softirq(SCHED_SOFTIRQ);
12500 
12501 	nohz_balancer_kick(rq);
12502 }
12503 
rq_online_fair(struct rq * rq)12504 static void rq_online_fair(struct rq *rq)
12505 {
12506 	update_sysctl();
12507 
12508 	update_runtime_enabled(rq);
12509 }
12510 
rq_offline_fair(struct rq * rq)12511 static void rq_offline_fair(struct rq *rq)
12512 {
12513 	update_sysctl();
12514 
12515 	/* Ensure any throttled groups are reachable by pick_next_task */
12516 	unthrottle_offline_cfs_rqs(rq);
12517 }
12518 
12519 #endif /* CONFIG_SMP */
12520 
12521 #ifdef CONFIG_SCHED_CORE
12522 static inline bool
__entity_slice_used(struct sched_entity * se,int min_nr_tasks)12523 __entity_slice_used(struct sched_entity *se, int min_nr_tasks)
12524 {
12525 	u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
12526 	u64 slice = se->slice;
12527 
12528 	return (rtime * min_nr_tasks > slice);
12529 }
12530 
12531 #define MIN_NR_TASKS_DURING_FORCEIDLE	2
task_tick_core(struct rq * rq,struct task_struct * curr)12532 static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
12533 {
12534 	if (!sched_core_enabled(rq))
12535 		return;
12536 
12537 	/*
12538 	 * If runqueue has only one task which used up its slice and
12539 	 * if the sibling is forced idle, then trigger schedule to
12540 	 * give forced idle task a chance.
12541 	 *
12542 	 * sched_slice() considers only this active rq and it gets the
12543 	 * whole slice. But during force idle, we have siblings acting
12544 	 * like a single runqueue and hence we need to consider runnable
12545 	 * tasks on this CPU and the forced idle CPU. Ideally, we should
12546 	 * go through the forced idle rq, but that would be a perf hit.
12547 	 * We can assume that the forced idle CPU has at least
12548 	 * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
12549 	 * if we need to give up the CPU.
12550 	 */
12551 	if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
12552 	    __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
12553 		resched_curr(rq);
12554 }
12555 
12556 /*
12557  * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
12558  */
se_fi_update(const struct sched_entity * se,unsigned int fi_seq,bool forceidle)12559 static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
12560 			 bool forceidle)
12561 {
12562 	for_each_sched_entity(se) {
12563 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
12564 
12565 		if (forceidle) {
12566 			if (cfs_rq->forceidle_seq == fi_seq)
12567 				break;
12568 			cfs_rq->forceidle_seq = fi_seq;
12569 		}
12570 
12571 		cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime;
12572 	}
12573 }
12574 
task_vruntime_update(struct rq * rq,struct task_struct * p,bool in_fi)12575 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
12576 {
12577 	struct sched_entity *se = &p->se;
12578 
12579 	if (p->sched_class != &fair_sched_class)
12580 		return;
12581 
12582 	se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
12583 }
12584 
cfs_prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)12585 bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
12586 			bool in_fi)
12587 {
12588 	struct rq *rq = task_rq(a);
12589 	const struct sched_entity *sea = &a->se;
12590 	const struct sched_entity *seb = &b->se;
12591 	struct cfs_rq *cfs_rqa;
12592 	struct cfs_rq *cfs_rqb;
12593 	s64 delta;
12594 
12595 	SCHED_WARN_ON(task_rq(b)->core != rq->core);
12596 
12597 #ifdef CONFIG_FAIR_GROUP_SCHED
12598 	/*
12599 	 * Find an se in the hierarchy for tasks a and b, such that the se's
12600 	 * are immediate siblings.
12601 	 */
12602 	while (sea->cfs_rq->tg != seb->cfs_rq->tg) {
12603 		int sea_depth = sea->depth;
12604 		int seb_depth = seb->depth;
12605 
12606 		if (sea_depth >= seb_depth)
12607 			sea = parent_entity(sea);
12608 		if (sea_depth <= seb_depth)
12609 			seb = parent_entity(seb);
12610 	}
12611 
12612 	se_fi_update(sea, rq->core->core_forceidle_seq, in_fi);
12613 	se_fi_update(seb, rq->core->core_forceidle_seq, in_fi);
12614 
12615 	cfs_rqa = sea->cfs_rq;
12616 	cfs_rqb = seb->cfs_rq;
12617 #else
12618 	cfs_rqa = &task_rq(a)->cfs;
12619 	cfs_rqb = &task_rq(b)->cfs;
12620 #endif
12621 
12622 	/*
12623 	 * Find delta after normalizing se's vruntime with its cfs_rq's
12624 	 * min_vruntime_fi, which would have been updated in prior calls
12625 	 * to se_fi_update().
12626 	 */
12627 	delta = (s64)(sea->vruntime - seb->vruntime) +
12628 		(s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi);
12629 
12630 	return delta > 0;
12631 }
12632 
task_is_throttled_fair(struct task_struct * p,int cpu)12633 static int task_is_throttled_fair(struct task_struct *p, int cpu)
12634 {
12635 	struct cfs_rq *cfs_rq;
12636 
12637 #ifdef CONFIG_FAIR_GROUP_SCHED
12638 	cfs_rq = task_group(p)->cfs_rq[cpu];
12639 #else
12640 	cfs_rq = &cpu_rq(cpu)->cfs;
12641 #endif
12642 	return throttled_hierarchy(cfs_rq);
12643 }
12644 #else
task_tick_core(struct rq * rq,struct task_struct * curr)12645 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
12646 #endif
12647 
12648 /*
12649  * scheduler tick hitting a task of our scheduling class.
12650  *
12651  * NOTE: This function can be called remotely by the tick offload that
12652  * goes along full dynticks. Therefore no local assumption can be made
12653  * and everything must be accessed through the @rq and @curr passed in
12654  * parameters.
12655  */
task_tick_fair(struct rq * rq,struct task_struct * curr,int queued)12656 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
12657 {
12658 	struct cfs_rq *cfs_rq;
12659 	struct sched_entity *se = &curr->se;
12660 
12661 	for_each_sched_entity(se) {
12662 		cfs_rq = cfs_rq_of(se);
12663 		entity_tick(cfs_rq, se, queued);
12664 	}
12665 
12666 	if (static_branch_unlikely(&sched_numa_balancing))
12667 		task_tick_numa(rq, curr);
12668 
12669 	update_misfit_status(curr, rq);
12670 	check_update_overutilized_status(task_rq(curr));
12671 
12672 	task_tick_core(rq, curr);
12673 }
12674 
12675 /*
12676  * called on fork with the child task as argument from the parent's context
12677  *  - child not yet on the tasklist
12678  *  - preemption disabled
12679  */
task_fork_fair(struct task_struct * p)12680 static void task_fork_fair(struct task_struct *p)
12681 {
12682 	struct sched_entity *se = &p->se, *curr;
12683 	struct cfs_rq *cfs_rq;
12684 	struct rq *rq = this_rq();
12685 	struct rq_flags rf;
12686 
12687 	rq_lock(rq, &rf);
12688 	update_rq_clock(rq);
12689 
12690 	cfs_rq = task_cfs_rq(current);
12691 	curr = cfs_rq->curr;
12692 	if (curr)
12693 		update_curr(cfs_rq);
12694 	place_entity(cfs_rq, se, ENQUEUE_INITIAL);
12695 	rq_unlock(rq, &rf);
12696 }
12697 
12698 /*
12699  * Priority of the task has changed. Check to see if we preempt
12700  * the current task.
12701  */
12702 static void
prio_changed_fair(struct rq * rq,struct task_struct * p,int oldprio)12703 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
12704 {
12705 	if (!task_on_rq_queued(p))
12706 		return;
12707 
12708 	if (rq->cfs.nr_running == 1)
12709 		return;
12710 
12711 	/*
12712 	 * Reschedule if we are currently running on this runqueue and
12713 	 * our priority decreased, or if we are not currently running on
12714 	 * this runqueue and our priority is higher than the current's
12715 	 */
12716 	if (task_current(rq, p)) {
12717 		if (p->prio > oldprio)
12718 			resched_curr(rq);
12719 	} else
12720 		check_preempt_curr(rq, p, 0);
12721 }
12722 
12723 #ifdef CONFIG_FAIR_GROUP_SCHED
12724 /*
12725  * Propagate the changes of the sched_entity across the tg tree to make it
12726  * visible to the root
12727  */
propagate_entity_cfs_rq(struct sched_entity * se)12728 static void propagate_entity_cfs_rq(struct sched_entity *se)
12729 {
12730 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
12731 
12732 	if (cfs_rq_throttled(cfs_rq))
12733 		return;
12734 
12735 	if (!throttled_hierarchy(cfs_rq))
12736 		list_add_leaf_cfs_rq(cfs_rq);
12737 
12738 	/* Start to propagate at parent */
12739 	se = se->parent;
12740 
12741 	for_each_sched_entity(se) {
12742 		cfs_rq = cfs_rq_of(se);
12743 
12744 		update_load_avg(cfs_rq, se, UPDATE_TG);
12745 
12746 		if (cfs_rq_throttled(cfs_rq))
12747 			break;
12748 
12749 		if (!throttled_hierarchy(cfs_rq))
12750 			list_add_leaf_cfs_rq(cfs_rq);
12751 	}
12752 }
12753 #else
propagate_entity_cfs_rq(struct sched_entity * se)12754 static void propagate_entity_cfs_rq(struct sched_entity *se) { }
12755 #endif
12756 
detach_entity_cfs_rq(struct sched_entity * se)12757 static void detach_entity_cfs_rq(struct sched_entity *se)
12758 {
12759 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
12760 
12761 #ifdef CONFIG_SMP
12762 	/*
12763 	 * In case the task sched_avg hasn't been attached:
12764 	 * - A forked task which hasn't been woken up by wake_up_new_task().
12765 	 * - A task which has been woken up by try_to_wake_up() but is
12766 	 *   waiting for actually being woken up by sched_ttwu_pending().
12767 	 */
12768 	if (!se->avg.last_update_time)
12769 		return;
12770 #endif
12771 
12772 	/* Catch up with the cfs_rq and remove our load when we leave */
12773 	update_load_avg(cfs_rq, se, 0);
12774 	detach_entity_load_avg(cfs_rq, se);
12775 	update_tg_load_avg(cfs_rq);
12776 	propagate_entity_cfs_rq(se);
12777 }
12778 
attach_entity_cfs_rq(struct sched_entity * se)12779 static void attach_entity_cfs_rq(struct sched_entity *se)
12780 {
12781 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
12782 
12783 	/* Synchronize entity with its cfs_rq */
12784 	update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
12785 	attach_entity_load_avg(cfs_rq, se);
12786 	update_tg_load_avg(cfs_rq);
12787 	propagate_entity_cfs_rq(se);
12788 }
12789 
detach_task_cfs_rq(struct task_struct * p)12790 static void detach_task_cfs_rq(struct task_struct *p)
12791 {
12792 	struct sched_entity *se = &p->se;
12793 
12794 	detach_entity_cfs_rq(se);
12795 }
12796 
attach_task_cfs_rq(struct task_struct * p)12797 static void attach_task_cfs_rq(struct task_struct *p)
12798 {
12799 	struct sched_entity *se = &p->se;
12800 
12801 	attach_entity_cfs_rq(se);
12802 }
12803 
switched_from_fair(struct rq * rq,struct task_struct * p)12804 static void switched_from_fair(struct rq *rq, struct task_struct *p)
12805 {
12806 	detach_task_cfs_rq(p);
12807 }
12808 
switched_to_fair(struct rq * rq,struct task_struct * p)12809 static void switched_to_fair(struct rq *rq, struct task_struct *p)
12810 {
12811 	attach_task_cfs_rq(p);
12812 
12813 	if (task_on_rq_queued(p)) {
12814 		/*
12815 		 * We were most likely switched from sched_rt, so
12816 		 * kick off the schedule if running, otherwise just see
12817 		 * if we can still preempt the current task.
12818 		 */
12819 		if (task_current(rq, p))
12820 			resched_curr(rq);
12821 		else
12822 			check_preempt_curr(rq, p, 0);
12823 	}
12824 }
12825 
12826 /* Account for a task changing its policy or group.
12827  *
12828  * This routine is mostly called to set cfs_rq->curr field when a task
12829  * migrates between groups/classes.
12830  */
set_next_task_fair(struct rq * rq,struct task_struct * p,bool first)12831 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
12832 {
12833 	struct sched_entity *se = &p->se;
12834 
12835 #ifdef CONFIG_SMP
12836 	if (task_on_rq_queued(p)) {
12837 		/*
12838 		 * Move the next running task to the front of the list, so our
12839 		 * cfs_tasks list becomes MRU one.
12840 		 */
12841 		list_move(&se->group_node, &rq->cfs_tasks);
12842 	}
12843 #endif
12844 
12845 	for_each_sched_entity(se) {
12846 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
12847 
12848 		set_next_entity(cfs_rq, se);
12849 		/* ensure bandwidth has been allocated on our new cfs_rq */
12850 		account_cfs_rq_runtime(cfs_rq, 0);
12851 	}
12852 }
12853 
init_cfs_rq(struct cfs_rq * cfs_rq)12854 void init_cfs_rq(struct cfs_rq *cfs_rq)
12855 {
12856 	cfs_rq->tasks_timeline = RB_ROOT_CACHED;
12857 	u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20)));
12858 #ifdef CONFIG_SMP
12859 	raw_spin_lock_init(&cfs_rq->removed.lock);
12860 #endif
12861 }
12862 
12863 #ifdef CONFIG_FAIR_GROUP_SCHED
task_change_group_fair(struct task_struct * p)12864 static void task_change_group_fair(struct task_struct *p)
12865 {
12866 	/*
12867 	 * We couldn't detach or attach a forked task which
12868 	 * hasn't been woken up by wake_up_new_task().
12869 	 */
12870 	if (READ_ONCE(p->__state) == TASK_NEW)
12871 		return;
12872 
12873 	detach_task_cfs_rq(p);
12874 
12875 #ifdef CONFIG_SMP
12876 	/* Tell se's cfs_rq has been changed -- migrated */
12877 	p->se.avg.last_update_time = 0;
12878 #endif
12879 	set_task_rq(p, task_cpu(p));
12880 	attach_task_cfs_rq(p);
12881 }
12882 
free_fair_sched_group(struct task_group * tg)12883 void free_fair_sched_group(struct task_group *tg)
12884 {
12885 	int i;
12886 
12887 	for_each_possible_cpu(i) {
12888 		if (tg->cfs_rq)
12889 			kfree(tg->cfs_rq[i]);
12890 		if (tg->se)
12891 			kfree(tg->se[i]);
12892 	}
12893 
12894 	kfree(tg->cfs_rq);
12895 	kfree(tg->se);
12896 }
12897 
alloc_fair_sched_group(struct task_group * tg,struct task_group * parent)12898 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
12899 {
12900 	struct sched_entity *se;
12901 	struct cfs_rq *cfs_rq;
12902 	int i;
12903 
12904 	tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
12905 	if (!tg->cfs_rq)
12906 		goto err;
12907 	tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
12908 	if (!tg->se)
12909 		goto err;
12910 
12911 	tg->shares = NICE_0_LOAD;
12912 
12913 	init_cfs_bandwidth(tg_cfs_bandwidth(tg), tg_cfs_bandwidth(parent));
12914 
12915 	for_each_possible_cpu(i) {
12916 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
12917 				      GFP_KERNEL, cpu_to_node(i));
12918 		if (!cfs_rq)
12919 			goto err;
12920 
12921 		se = kzalloc_node(sizeof(struct sched_entity_stats),
12922 				  GFP_KERNEL, cpu_to_node(i));
12923 		if (!se)
12924 			goto err_free_rq;
12925 
12926 		init_cfs_rq(cfs_rq);
12927 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
12928 		init_entity_runnable_average(se);
12929 	}
12930 
12931 	return 1;
12932 
12933 err_free_rq:
12934 	kfree(cfs_rq);
12935 err:
12936 	return 0;
12937 }
12938 
online_fair_sched_group(struct task_group * tg)12939 void online_fair_sched_group(struct task_group *tg)
12940 {
12941 	struct sched_entity *se;
12942 	struct rq_flags rf;
12943 	struct rq *rq;
12944 	int i;
12945 
12946 	for_each_possible_cpu(i) {
12947 		rq = cpu_rq(i);
12948 		se = tg->se[i];
12949 		rq_lock_irq(rq, &rf);
12950 		update_rq_clock(rq);
12951 		attach_entity_cfs_rq(se);
12952 		sync_throttle(tg, i);
12953 		rq_unlock_irq(rq, &rf);
12954 	}
12955 }
12956 
unregister_fair_sched_group(struct task_group * tg)12957 void unregister_fair_sched_group(struct task_group *tg)
12958 {
12959 	unsigned long flags;
12960 	struct rq *rq;
12961 	int cpu;
12962 
12963 	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
12964 
12965 	for_each_possible_cpu(cpu) {
12966 		if (tg->se[cpu])
12967 			remove_entity_load_avg(tg->se[cpu]);
12968 
12969 		/*
12970 		 * Only empty task groups can be destroyed; so we can speculatively
12971 		 * check on_list without danger of it being re-added.
12972 		 */
12973 		if (!tg->cfs_rq[cpu]->on_list)
12974 			continue;
12975 
12976 		rq = cpu_rq(cpu);
12977 
12978 		raw_spin_rq_lock_irqsave(rq, flags);
12979 		list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
12980 		raw_spin_rq_unlock_irqrestore(rq, flags);
12981 	}
12982 }
12983 
init_tg_cfs_entry(struct task_group * tg,struct cfs_rq * cfs_rq,struct sched_entity * se,int cpu,struct sched_entity * parent)12984 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
12985 			struct sched_entity *se, int cpu,
12986 			struct sched_entity *parent)
12987 {
12988 	struct rq *rq = cpu_rq(cpu);
12989 
12990 	cfs_rq->tg = tg;
12991 	cfs_rq->rq = rq;
12992 	init_cfs_rq_runtime(cfs_rq);
12993 
12994 	tg->cfs_rq[cpu] = cfs_rq;
12995 	tg->se[cpu] = se;
12996 
12997 	/* se could be NULL for root_task_group */
12998 	if (!se)
12999 		return;
13000 
13001 	if (!parent) {
13002 		se->cfs_rq = &rq->cfs;
13003 		se->depth = 0;
13004 	} else {
13005 		se->cfs_rq = parent->my_q;
13006 		se->depth = parent->depth + 1;
13007 	}
13008 
13009 	se->my_q = cfs_rq;
13010 	/* guarantee group entities always have weight */
13011 	update_load_set(&se->load, NICE_0_LOAD);
13012 	se->parent = parent;
13013 }
13014 
13015 static DEFINE_MUTEX(shares_mutex);
13016 
__sched_group_set_shares(struct task_group * tg,unsigned long shares)13017 static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
13018 {
13019 	int i;
13020 
13021 	lockdep_assert_held(&shares_mutex);
13022 
13023 	/*
13024 	 * We can't change the weight of the root cgroup.
13025 	 */
13026 	if (!tg->se[0])
13027 		return -EINVAL;
13028 
13029 	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
13030 
13031 	if (tg->shares == shares)
13032 		return 0;
13033 
13034 	tg->shares = shares;
13035 	for_each_possible_cpu(i) {
13036 		struct rq *rq = cpu_rq(i);
13037 		struct sched_entity *se = tg->se[i];
13038 		struct rq_flags rf;
13039 
13040 		/* Propagate contribution to hierarchy */
13041 		rq_lock_irqsave(rq, &rf);
13042 		update_rq_clock(rq);
13043 		for_each_sched_entity(se) {
13044 			update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
13045 			update_cfs_group(se);
13046 		}
13047 		rq_unlock_irqrestore(rq, &rf);
13048 	}
13049 
13050 	return 0;
13051 }
13052 
sched_group_set_shares(struct task_group * tg,unsigned long shares)13053 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
13054 {
13055 	int ret;
13056 
13057 	mutex_lock(&shares_mutex);
13058 	if (tg_is_idle(tg))
13059 		ret = -EINVAL;
13060 	else
13061 		ret = __sched_group_set_shares(tg, shares);
13062 	mutex_unlock(&shares_mutex);
13063 
13064 	return ret;
13065 }
13066 
sched_group_set_idle(struct task_group * tg,long idle)13067 int sched_group_set_idle(struct task_group *tg, long idle)
13068 {
13069 	int i;
13070 
13071 	if (tg == &root_task_group)
13072 		return -EINVAL;
13073 
13074 	if (idle < 0 || idle > 1)
13075 		return -EINVAL;
13076 
13077 	mutex_lock(&shares_mutex);
13078 
13079 	if (tg->idle == idle) {
13080 		mutex_unlock(&shares_mutex);
13081 		return 0;
13082 	}
13083 
13084 	tg->idle = idle;
13085 
13086 	for_each_possible_cpu(i) {
13087 		struct rq *rq = cpu_rq(i);
13088 		struct sched_entity *se = tg->se[i];
13089 		struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i];
13090 		bool was_idle = cfs_rq_is_idle(grp_cfs_rq);
13091 		long idle_task_delta;
13092 		struct rq_flags rf;
13093 
13094 		rq_lock_irqsave(rq, &rf);
13095 
13096 		grp_cfs_rq->idle = idle;
13097 		if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq)))
13098 			goto next_cpu;
13099 
13100 		if (se->on_rq) {
13101 			parent_cfs_rq = cfs_rq_of(se);
13102 			if (cfs_rq_is_idle(grp_cfs_rq))
13103 				parent_cfs_rq->idle_nr_running++;
13104 			else
13105 				parent_cfs_rq->idle_nr_running--;
13106 		}
13107 
13108 		idle_task_delta = grp_cfs_rq->h_nr_running -
13109 				  grp_cfs_rq->idle_h_nr_running;
13110 		if (!cfs_rq_is_idle(grp_cfs_rq))
13111 			idle_task_delta *= -1;
13112 
13113 		for_each_sched_entity(se) {
13114 			struct cfs_rq *cfs_rq = cfs_rq_of(se);
13115 
13116 			if (!se->on_rq)
13117 				break;
13118 
13119 			cfs_rq->idle_h_nr_running += idle_task_delta;
13120 
13121 			/* Already accounted at parent level and above. */
13122 			if (cfs_rq_is_idle(cfs_rq))
13123 				break;
13124 		}
13125 
13126 next_cpu:
13127 		rq_unlock_irqrestore(rq, &rf);
13128 	}
13129 
13130 	/* Idle groups have minimum weight. */
13131 	if (tg_is_idle(tg))
13132 		__sched_group_set_shares(tg, scale_load(WEIGHT_IDLEPRIO));
13133 	else
13134 		__sched_group_set_shares(tg, NICE_0_LOAD);
13135 
13136 	mutex_unlock(&shares_mutex);
13137 	return 0;
13138 }
13139 
13140 #else /* CONFIG_FAIR_GROUP_SCHED */
13141 
free_fair_sched_group(struct task_group * tg)13142 void free_fair_sched_group(struct task_group *tg) { }
13143 
alloc_fair_sched_group(struct task_group * tg,struct task_group * parent)13144 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
13145 {
13146 	return 1;
13147 }
13148 
online_fair_sched_group(struct task_group * tg)13149 void online_fair_sched_group(struct task_group *tg) { }
13150 
unregister_fair_sched_group(struct task_group * tg)13151 void unregister_fair_sched_group(struct task_group *tg) { }
13152 
13153 #endif /* CONFIG_FAIR_GROUP_SCHED */
13154 
13155 
get_rr_interval_fair(struct rq * rq,struct task_struct * task)13156 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
13157 {
13158 	struct sched_entity *se = &task->se;
13159 	unsigned int rr_interval = 0;
13160 
13161 	/*
13162 	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
13163 	 * idle runqueue:
13164 	 */
13165 	if (rq->cfs.load.weight)
13166 		rr_interval = NS_TO_JIFFIES(se->slice);
13167 
13168 	return rr_interval;
13169 }
13170 
13171 /*
13172  * All the scheduling class methods:
13173  */
13174 DEFINE_SCHED_CLASS(fair) = {
13175 
13176 	.enqueue_task		= enqueue_task_fair,
13177 	.dequeue_task		= dequeue_task_fair,
13178 	.yield_task		= yield_task_fair,
13179 	.yield_to_task		= yield_to_task_fair,
13180 
13181 	.check_preempt_curr	= check_preempt_wakeup,
13182 
13183 	.pick_next_task		= __pick_next_task_fair,
13184 	.put_prev_task		= put_prev_task_fair,
13185 	.set_next_task          = set_next_task_fair,
13186 
13187 #ifdef CONFIG_SMP
13188 	.balance		= balance_fair,
13189 	.pick_task		= pick_task_fair,
13190 	.select_task_rq		= select_task_rq_fair,
13191 	.migrate_task_rq	= migrate_task_rq_fair,
13192 
13193 	.rq_online		= rq_online_fair,
13194 	.rq_offline		= rq_offline_fair,
13195 
13196 	.task_dead		= task_dead_fair,
13197 	.set_cpus_allowed	= set_cpus_allowed_common,
13198 #endif
13199 
13200 	.task_tick		= task_tick_fair,
13201 	.task_fork		= task_fork_fair,
13202 
13203 	.prio_changed		= prio_changed_fair,
13204 	.switched_from		= switched_from_fair,
13205 	.switched_to		= switched_to_fair,
13206 
13207 	.get_rr_interval	= get_rr_interval_fair,
13208 
13209 	.update_curr		= update_curr_fair,
13210 
13211 #ifdef CONFIG_FAIR_GROUP_SCHED
13212 	.task_change_group	= task_change_group_fair,
13213 #endif
13214 
13215 #ifdef CONFIG_SCHED_CORE
13216 	.task_is_throttled	= task_is_throttled_fair,
13217 #endif
13218 
13219 #ifdef CONFIG_UCLAMP_TASK
13220 	.uclamp_enabled		= 1,
13221 #endif
13222 };
13223 
13224 #ifdef CONFIG_SCHED_DEBUG
print_cfs_stats(struct seq_file * m,int cpu)13225 void print_cfs_stats(struct seq_file *m, int cpu)
13226 {
13227 	struct cfs_rq *cfs_rq, *pos;
13228 
13229 	rcu_read_lock();
13230 	for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
13231 		print_cfs_rq(m, cpu, cfs_rq);
13232 	rcu_read_unlock();
13233 }
13234 
13235 #ifdef CONFIG_NUMA_BALANCING
show_numa_stats(struct task_struct * p,struct seq_file * m)13236 void show_numa_stats(struct task_struct *p, struct seq_file *m)
13237 {
13238 	int node;
13239 	unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
13240 	struct numa_group *ng;
13241 
13242 	rcu_read_lock();
13243 	ng = rcu_dereference(p->numa_group);
13244 	for_each_online_node(node) {
13245 		if (p->numa_faults) {
13246 			tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
13247 			tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
13248 		}
13249 		if (ng) {
13250 			gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
13251 			gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
13252 		}
13253 		print_numa_stats(m, node, tsf, tpf, gsf, gpf);
13254 	}
13255 	rcu_read_unlock();
13256 }
13257 #endif /* CONFIG_NUMA_BALANCING */
13258 #endif /* CONFIG_SCHED_DEBUG */
13259 
init_sched_fair_class(void)13260 __init void init_sched_fair_class(void)
13261 {
13262 #ifdef CONFIG_SMP
13263 	int i;
13264 
13265 	for_each_possible_cpu(i) {
13266 		zalloc_cpumask_var_node(&per_cpu(load_balance_mask, i), GFP_KERNEL, cpu_to_node(i));
13267 		zalloc_cpumask_var_node(&per_cpu(select_rq_mask,    i), GFP_KERNEL, cpu_to_node(i));
13268 		zalloc_cpumask_var_node(&per_cpu(should_we_balance_tmpmask, i),
13269 					GFP_KERNEL, cpu_to_node(i));
13270 
13271 #ifdef CONFIG_CFS_BANDWIDTH
13272 		INIT_CSD(&cpu_rq(i)->cfsb_csd, __cfsb_csd_unthrottle, cpu_rq(i));
13273 		INIT_LIST_HEAD(&cpu_rq(i)->cfsb_csd_list);
13274 #endif
13275 	}
13276 
13277 	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
13278 
13279 #ifdef CONFIG_NO_HZ_COMMON
13280 	nohz.next_balance = jiffies;
13281 	nohz.next_blocked = jiffies;
13282 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
13283 #endif
13284 #endif /* SMP */
13285 
13286 }
13287