• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4  *
5  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  *
7  *  Interactivity improvements by Mike Galbraith
8  *  (C) 2007 Mike Galbraith <efault@gmx.de>
9  *
10  *  Various enhancements by Dmitry Adamushko.
11  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12  *
13  *  Group scheduling enhancements by Srivatsa Vaddagiri
14  *  Copyright IBM Corporation, 2007
15  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16  *
17  *  Scaled math optimizations by Thomas Gleixner
18  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19  *
20  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
22  */
23 #include "sched.h"
24 #include "walt.h"
25 #include "rtg/rtg.h"
26 
27 #ifdef CONFIG_SCHED_WALT
28 static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p, u16 updated_demand_scaled);
29 #endif
30 
31 #if defined(CONFIG_SCHED_WALT) && defined(CONFIG_CFS_BANDWIDTH)
32 static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq);
33 static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p);
34 static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p);
35 static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats, struct cfs_rq *cfs_rq);
36 static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats, struct cfs_rq *cfs_rq);
37 #else
walt_init_cfs_rq_stats(struct cfs_rq * cfs_rq)38 static inline void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq)
39 {
40 }
walt_inc_cfs_rq_stats(struct cfs_rq * cfs_rq,struct task_struct * p)41 static inline void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p)
42 {
43 }
walt_dec_cfs_rq_stats(struct cfs_rq * cfs_rq,struct task_struct * p)44 static inline void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p)
45 {
46 }
47 
48 #define walt_inc_throttled_cfs_rq_stats(...)
49 #define walt_dec_throttled_cfs_rq_stats(...)
50 
51 #endif
52 
53 #define FAIR_THREE 3
54 #define FAIR_FOUR 4
55 #define FAIR_TWENTY 20
56 #define FAIR_ONEHUNDRED 100
57 #define FAIR_TWOHUNDREDFIFTYTHREE 253
58 #define FAIR_TWOHUNDREDFIFTYSIX 256
59 #define FAIR_ONETHOUSAND 1000
60 #define FAIR_SIXTYTHOUSAND 60000
61 
62 /*
63  * Targeted preemption latency for CPU-bound tasks:
64  *
65  * NOTE: this latency value is not the same as the concept of
66  * 'timeslice length' - timeslices in CFS are of variable length
67  * and have no persistent notion like in traditional, time-slice
68  * based scheduling concepts.
69  *
70  * (to see the precise effective timeslice length of your workload,
71  *  run vmstat and monitor the context-switches (cs) field)
72  *
73  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
74  */
75 unsigned int sysctl_sched_latency = 6000000ULL;
76 static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
77 
78 /*
79  * The initial- and re-scaling of tunables is configurable
80  *
81  * Options are:
82  *
83  *   SCHED_TUNABLESCALING_NONE - unscaled, always *1
84  *   SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
85  *   SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
86  *
87  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
88  */
89 enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
90 
91 /*
92  * Minimal preemption granularity for CPU-bound tasks:
93  *
94  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
95  */
96 unsigned int sysctl_sched_min_granularity = 750000ULL;
97 EXPORT_SYMBOL_GPL(sysctl_sched_min_granularity);
98 static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
99 
100 /*
101  * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
102  */
103 static unsigned int sched_nr_latency = 8;
104 
105 /*
106  * After fork, child runs first. If set to 0 (default) then
107  * parent will (try to) run first.
108  */
109 unsigned int sysctl_sched_child_runs_first __read_mostly;
110 
111 /*
112  * SCHED_OTHER wake-up granularity.
113  *
114  * This option delays the preemption effects of decoupled workloads
115  * and reduces their over-scheduling. Synchronous workloads will still
116  * have immediate wakeup/sleep latencies.
117  *
118  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
119  */
120 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
121 static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
122 
123 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
124 
125 int sched_thermal_decay_shift;
setup_sched_thermal_decay_shift(char * str)126 static int __init setup_sched_thermal_decay_shift(char *str)
127 {
128     int _shift = 0;
129 
130     if (kstrtoint(str, 0, &_shift)) {
131         pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n");
132     }
133 
134     sched_thermal_decay_shift = clamp(_shift, 0, 10);
135     return 1;
136 }
137 __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
138 
139 #ifdef CONFIG_SMP
140 /*
141  * For asym packing, by default the lower numbered CPU has higher priority.
142  */
arch_asym_cpu_priority(int cpu)143 int __weak arch_asym_cpu_priority(int cpu)
144 {
145     return -cpu;
146 }
147 
148 /*
149  * The margin used when comparing utilization with CPU capacity.
150  *
151  * (default: ~20%)
152  */
153 #define fits_capacity(cap, max) ((cap)*1280 < (max)*1024)
154 
155 #endif
156 
157 #ifdef CONFIG_CFS_BANDWIDTH
158 /*
159  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
160  * each time a cfs_rq requests quota.
161  *
162  * Note: in the case that the slice exceeds the runtime remaining (either due
163  * to consumption or the quota being specified to be smaller than the slice)
164  * we will always only issue the remaining available time.
165  *
166  * (default: 5 msec, units: microseconds)
167  */
168 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
169 #endif
170 
update_load_add(struct load_weight * lw,unsigned long inc)171 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
172 {
173     lw->weight += inc;
174     lw->inv_weight = 0;
175 }
176 
update_load_sub(struct load_weight * lw,unsigned long dec)177 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
178 {
179     lw->weight -= dec;
180     lw->inv_weight = 0;
181 }
182 
update_load_set(struct load_weight * lw,unsigned long w)183 static inline void update_load_set(struct load_weight *lw, unsigned long w)
184 {
185     lw->weight = w;
186     lw->inv_weight = 0;
187 }
188 
189 /*
190  * Increase the granularity value when there are more CPUs,
191  * because with more CPUs the 'effective latency' as visible
192  * to users decreases. But the relationship is not linear,
193  * so pick a second-best guess by going with the log2 of the
194  * number of CPUs.
195  *
196  * This idea comes from the SD scheduler of Con Kolivas:
197  */
get_update_sysctl_factor(void)198 static unsigned int get_update_sysctl_factor(void)
199 {
200     unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
201     unsigned int factor;
202 
203     switch (sysctl_sched_tunable_scaling) {
204         case SCHED_TUNABLESCALING_NONE:
205             factor = 1;
206             break;
207         case SCHED_TUNABLESCALING_LINEAR:
208             factor = cpus;
209             break;
210         case SCHED_TUNABLESCALING_LOG:
211         default:
212             factor = 1 + ilog2(cpus);
213             break;
214     }
215 
216     return factor;
217 }
218 
update_sysctl(void)219 static void update_sysctl(void)
220 {
221     unsigned int factor = get_update_sysctl_factor();
222 
223 #define SET_SYSCTL(name) \
224     (sysctl_##name = (factor) * normalized_sysctl_##name)
225     SET_SYSCTL(sched_min_granularity);
226     SET_SYSCTL(sched_latency);
227     SET_SYSCTL(sched_wakeup_granularity);
228 #undef SET_SYSCTL
229 }
230 
sched_init_granularity(void)231 void __init sched_init_granularity(void)
232 {
233     update_sysctl();
234 }
235 
236 #define WMULT_CONST (~0U)
237 #define WMULT_SHIFT 32
238 
fair_update_inv_weight(struct load_weight * lw)239 static void fair_update_inv_weight(struct load_weight *lw)
240 {
241     unsigned long w;
242 
243     if (likely(lw->inv_weight)) {
244         return;
245     }
246 
247     w = scale_load_down(lw->weight);
248     if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) {
249         lw->inv_weight = 1;
250     } else if (unlikely(!w)) {
251         lw->inv_weight = WMULT_CONST;
252     } else {
253         lw->inv_weight = WMULT_CONST / w;
254     }
255 }
256 
257 /*
258  * delta_exec * weight / lw.weight
259  *   OR
260  * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
261  *
262  * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
263  * we're guaranteed shift stays positive because inv_weight is guaranteed to
264  * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
265  *
266  * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
267  * weight/lw.weight <= 1, and therefore our shift will also be positive.
268  */
fair_calc_delta(u64 delta_exec,unsigned long weight,struct load_weight * lw)269 static u64 fair_calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
270 {
271     u64 fact = scale_load_down(weight);
272     int shift = WMULT_SHIFT;
273 
274     fair_update_inv_weight(lw);
275 
276     if (unlikely(fact >> 32)) {
277         while (fact >> 32) {
278             fact >>= 1;
279             shift--;
280         }
281     }
282 
283     fact = mul_u32_u32(fact, lw->inv_weight);
284 
285     while (fact >> 32) {
286         fact >>= 1;
287         shift--;
288     }
289 
290     return mul_u64_u32_shr(delta_exec, fact, shift);
291 }
292 
293 const struct sched_class fair_sched_class;
294 
295 /**************************************************************
296  * CFS operations on generic schedulable entities:
297  */
298 
299 #ifdef CONFIG_FAIR_GROUP_SCHED
task_of(struct sched_entity * se)300 static inline struct task_struct *task_of(struct sched_entity *se)
301 {
302     SCHED_WARN_ON(!entity_is_task(se));
303     return container_of(se, struct task_struct, se);
304 }
305 
306 /* Walk up scheduling entities hierarchy */
307 #define for_each_sched_entity(se) for (; se; se = se->parent)
308 
task_cfs_rq(struct task_struct * p)309 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
310 {
311     return p->se.cfs_rq;
312 }
313 
314 /* runqueue on which this entity is (to be) queued */
cfs_rq_of(struct sched_entity * se)315 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
316 {
317     return se->cfs_rq;
318 }
319 
320 /* runqueue "owned" by this group */
group_cfs_rq(struct sched_entity * grp)321 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
322 {
323     return grp->my_q;
324 }
325 
cfs_rq_tg_path(struct cfs_rq * cfs_rq,char * path,int len)326 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
327 {
328     if (!path) {
329         return;
330     }
331 
332     if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) {
333         autogroup_path(cfs_rq->tg, path, len);
334     } else if (cfs_rq && cfs_rq->tg->css.cgroup) {
335         cgroup_path(cfs_rq->tg->css.cgroup, path, len);
336     } else {
337         strlcpy(path, "(null)", len);
338     }
339 }
340 
list_add_leaf_cfs_rq(struct cfs_rq * cfs_rq)341 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
342 {
343     struct rq *rq = rq_of(cfs_rq);
344     int cpu = cpu_of(rq);
345 
346     if (cfs_rq->on_list) {
347         return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
348     }
349 
350     cfs_rq->on_list = 1;
351 
352     /*
353      * Ensure we either appear before our parent (if already
354      * enqueued) or force our parent to appear after us when it is
355      * enqueued. The fact that we always enqueue bottom-up
356      * reduces this to two cases and a special case for the root
357      * cfs_rq. Furthermore, it also means that we will always reset
358      * tmp_alone_branch either when the branch is connected
359      * to a tree or when we reach the top of the tree
360      */
361     if (cfs_rq->tg->parent && cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
362         /*
363          * If parent is already on the list, we add the child
364          * just before. Thanks to circular linked property of
365          * the list, this means to put the child at the tail
366          * of the list that starts by parent.
367          */
368         list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
369         /*
370          * The branch is now connected to its tree so we can
371          * reset tmp_alone_branch to the beginning of the
372          * list.
373          */
374         rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
375         return true;
376     }
377 
378     if (!cfs_rq->tg->parent) {
379         /*
380          * cfs rq without parent should be put
381          * at the tail of the list.
382          */
383         list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
384         /*
385          * We have reach the top of a tree so we can reset
386          * tmp_alone_branch to the beginning of the list.
387          */
388         rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
389         return true;
390     }
391 
392     /*
393      * The parent has not already been added so we want to
394      * make sure that it will be put after us.
395      * tmp_alone_branch points to the begin of the branch
396      * where we will add parent.
397      */
398     list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
399     /*
400      * update tmp_alone_branch to points to the new begin
401      * of the branch
402      */
403     rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
404     return false;
405 }
406 
list_del_leaf_cfs_rq(struct cfs_rq * cfs_rq)407 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
408 {
409     if (cfs_rq->on_list) {
410         struct rq *rq = rq_of(cfs_rq);
411 
412         /*
413          * With cfs_rq being unthrottled/throttled during an enqueue,
414          * it can happen the tmp_alone_branch points the a leaf that
415          * we finally want to del. In this case, tmp_alone_branch moves
416          * to the prev element but it will point to rq->leaf_cfs_rq_list
417          * at the end of the enqueue.
418          */
419         if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) {
420             rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
421         }
422 
423         list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
424         cfs_rq->on_list = 0;
425     }
426 }
427 
assert_list_leaf_cfs_rq(struct rq * rq)428 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
429 {
430     SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
431 }
432 
433 /* Iterate thr' all leaf cfs_rq's on a runqueue */
434 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)                                                                     \
435     list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
436 
437 /* Do the two (enqueued) entities belong to the same group ? */
is_same_group(struct sched_entity * se,struct sched_entity * pse)438 static inline struct cfs_rq *is_same_group(struct sched_entity *se, struct sched_entity *pse)
439 {
440     if (se->cfs_rq == pse->cfs_rq) {
441         return se->cfs_rq;
442     }
443 
444     return NULL;
445 }
446 
parent_entity(struct sched_entity * se)447 static inline struct sched_entity *parent_entity(struct sched_entity *se)
448 {
449     return se->parent;
450 }
451 
find_matching_se(struct sched_entity ** se,struct sched_entity ** pse)452 static void find_matching_se(struct sched_entity **se, struct sched_entity **pse)
453 {
454     int se_depth, pse_depth;
455 
456     /*
457      * preemption test can be made between sibling entities who are in the
458      * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
459      * both tasks until we find their ancestors who are siblings of common
460      * parent.
461      */
462 
463     /* First walk up until both entities are at same depth */
464     se_depth = (*se)->depth;
465     pse_depth = (*pse)->depth;
466 
467     while (se_depth > pse_depth) {
468         se_depth--;
469         *se = parent_entity(*se);
470     }
471 
472     while (pse_depth > se_depth) {
473         pse_depth--;
474         *pse = parent_entity(*pse);
475     }
476 
477     while (!is_same_group(*se, *pse)) {
478         *se = parent_entity(*se);
479         *pse = parent_entity(*pse);
480     }
481 }
482 
483 #else /* !CONFIG_FAIR_GROUP_SCHED */
484 
task_of(struct sched_entity * se)485 static inline struct task_struct *task_of(struct sched_entity *se)
486 {
487     return container_of(se, struct task_struct, se);
488 }
489 
490 #define for_each_sched_entity(se) (for (; se; se = NULL))
491 
task_cfs_rq(struct task_struct * p)492 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
493 {
494     return &task_rq(p)->cfs;
495 }
496 
cfs_rq_of(struct sched_entity * se)497 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
498 {
499     struct task_struct *p = task_of(se);
500     struct rq *rq = task_rq(p);
501 
502     return &rq->cfs;
503 }
504 
505 /* runqueue "owned" by this group */
group_cfs_rq(struct sched_entity * grp)506 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
507 {
508     return NULL;
509 }
510 
cfs_rq_tg_path(struct cfs_rq * cfs_rq,char * path,int len)511 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
512 {
513     if (path) {
514         strlcpy(path, "(null)", len);
515     }
516 }
517 
list_add_leaf_cfs_rq(struct cfs_rq * cfs_rq)518 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
519 {
520     return true;
521 }
522 
list_del_leaf_cfs_rq(struct cfs_rq * cfs_rq)523 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
524 {
525 }
526 
assert_list_leaf_cfs_rq(struct rq * rq)527 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
528 {
529 }
530 
531 #define for_each_leaf_cfs_rq_safe(rq, cfs, pos) (for ((cfs) = &(rq)->cfs, (pos) = NULL; (cfs); (cfs) = (pos)))
532 
parent_entity(struct sched_entity * se)533 static inline struct sched_entity *parent_entity(struct sched_entity *se)
534 {
535     return NULL;
536 }
537 
find_matching_se(struct sched_entity ** se,struct sched_entity ** pse)538 static inline void find_matching_se(struct sched_entity **se, struct sched_entity **pse)
539 {
540 }
541 
542 #endif /* CONFIG_FAIR_GROUP_SCHED */
543 
544 static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
545 
546 /**************************************************************
547  * Scheduling class tree data structure manipulation methods:
548  */
549 
max_vruntime(u64 max_vruntime,u64 vruntime)550 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
551 {
552     s64 delta = (s64)(vruntime - max_vruntime);
553     if (delta > 0) {
554         max_vruntime = vruntime;
555     }
556 
557     return max_vruntime;
558 }
559 
min_vruntime(u64 min_vruntime,u64 vruntime)560 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
561 {
562     s64 delta = (s64)(vruntime - min_vruntime);
563     if (delta < 0) {
564         min_vruntime = vruntime;
565     }
566 
567     return min_vruntime;
568 }
569 
entity_before(struct sched_entity * a,struct sched_entity * b)570 static inline int entity_before(struct sched_entity *a, struct sched_entity *b)
571 {
572     return (s64)(a->vruntime - b->vruntime) < 0;
573 }
574 
update_min_vruntime(struct cfs_rq * cfs_rq)575 static void update_min_vruntime(struct cfs_rq *cfs_rq)
576 {
577     struct sched_entity *curr = cfs_rq->curr;
578     struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
579 
580     u64 vruntime = cfs_rq->min_vruntime;
581 
582     if (curr) {
583         if (curr->on_rq) {
584             vruntime = curr->vruntime;
585         } else {
586             curr = NULL;
587         }
588     }
589 
590     if (leftmost) { /* non-empty tree */
591         struct sched_entity *se;
592         se = rb_entry(leftmost, struct sched_entity, run_node);
593 
594         if (!curr) {
595             vruntime = se->vruntime;
596         } else {
597             vruntime = min_vruntime(vruntime, se->vruntime);
598         }
599     }
600 
601     /* ensure we never gain time by being placed backwards. */
602     cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
603 #ifndef CONFIG_64BIT
604     smp_wmb();
605     cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
606 #endif
607 }
608 
609 /*
610  * Enqueue an entity into the rb-tree:
611  */
fair_enqueue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)612 static void fair_enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
613 {
614     struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node;
615     struct rb_node *parent = NULL;
616     struct sched_entity *entry;
617     bool leftmost = true;
618 
619     /*
620      * Find the right place in the rbtree:
621      */
622     while (*link) {
623         parent = *link;
624         entry = rb_entry(parent, struct sched_entity, run_node);
625         /*
626          * We dont care about collisions. Nodes with
627          * the same key stay together.
628          */
629         if (entity_before(se, entry)) {
630             link = &parent->rb_left;
631         } else {
632             link = &parent->rb_right;
633             leftmost = false;
634         }
635     }
636 
637     rb_link_node(&se->run_node, parent, link);
638     rb_insert_color_cached(&se->run_node, &cfs_rq->tasks_timeline, leftmost);
639 }
640 
fair_dequeue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)641 static void fair_dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
642 {
643     rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
644 }
645 
__pick_first_entity(struct cfs_rq * cfs_rq)646 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
647 {
648     struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
649 
650     if (!left) {
651         return NULL;
652     }
653 
654     return rb_entry(left, struct sched_entity, run_node);
655 }
656 
fair_pick_next_entity(struct sched_entity * se)657 static struct sched_entity *fair_pick_next_entity(struct sched_entity *se)
658 {
659     struct rb_node *next = rb_next(&se->run_node);
660 
661     if (!next) {
662         return NULL;
663     }
664 
665     return rb_entry(next, struct sched_entity, run_node);
666 }
667 
668 #ifdef CONFIG_SCHED_DEBUG
__pick_last_entity(struct cfs_rq * cfs_rq)669 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
670 {
671     struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
672 
673     if (!last) {
674         return NULL;
675     }
676 
677     return rb_entry(last, struct sched_entity, run_node);
678 }
679 
680 /**************************************************************
681  * Scheduling class statistics methods
682  */
683 
sched_proc_update_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)684 int sched_proc_update_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos)
685 {
686     int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
687     unsigned int factor = get_update_sysctl_factor();
688 
689     if (ret || !write) {
690         return ret;
691     }
692 
693     sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, sysctl_sched_min_granularity);
694 
695 #define WRT_SYSCTL(name) (normalized_sysctl_##name = sysctl_##name / (factor))
696     WRT_SYSCTL(sched_min_granularity);
697     WRT_SYSCTL(sched_latency);
698     WRT_SYSCTL(sched_wakeup_granularity);
699 #undef WRT_SYSCTL
700 
701     return 0;
702 }
703 #endif
704 
705 /*
706  * delta /= w
707  */
calc_delta_fair(u64 delta,struct sched_entity * se)708 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
709 {
710     if (unlikely(se->load.weight != NICE_0_LOAD)) {
711         delta = fair_calc_delta(delta, NICE_0_LOAD, &se->load);
712     }
713 
714     return delta;
715 }
716 
717 /*
718  * The idea is to set a period in which each task runs once.
719  *
720  * When there are too many tasks (sched_nr_latency) we have to stretch
721  * this period because otherwise the slices get too small.
722  *
723  * p = (nr <= nl) ? l : l*nr/nl
724  */
fair_sched_period(unsigned long nr_running)725 static u64 fair_sched_period(unsigned long nr_running)
726 {
727     if (unlikely(nr_running > sched_nr_latency)) {
728         return nr_running * sysctl_sched_min_granularity;
729     } else {
730         return sysctl_sched_latency;
731     }
732 }
733 
734 /*
735  * We calculate the wall-time slice from the period by taking a part
736  * proportional to the weight.
737  *
738  * s = p*P[w/rw]
739  */
sched_slice(struct cfs_rq * cfs_rq,struct sched_entity * se)740 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
741 {
742     unsigned int nr_running = cfs_rq->nr_running;
743     u64 slice;
744 
745     if (sched_feat(ALT_PERIOD)) {
746         nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
747     }
748 
749     slice = fair_sched_period(nr_running + !se->on_rq);
750 
751     for_each_sched_entity(se) {
752         struct load_weight *load;
753         struct load_weight lw;
754 
755         cfs_rq = cfs_rq_of(se);
756         load = &cfs_rq->load;
757 
758         if (unlikely(!se->on_rq)) {
759             lw = cfs_rq->load;
760 
761             update_load_add(&lw, se->load.weight);
762             load = &lw;
763         }
764         slice = fair_calc_delta(slice, se->load.weight, load);
765     }
766 
767     if (sched_feat(BASE_SLICE)) {
768         slice = max(slice, (u64)sysctl_sched_min_granularity);
769     }
770 
771     return slice;
772 }
773 
774 /*
775  * We calculate the vruntime slice of a to-be-inserted task.
776  *
777  * vs = s/w
778  */
sched_vslice(struct cfs_rq * cfs_rq,struct sched_entity * se)779 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
780 {
781     return calc_delta_fair(sched_slice(cfs_rq, se), se);
782 }
783 
784 #include "pelt.h"
785 #ifdef CONFIG_SMP
786 
787 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
788 static unsigned long task_h_load(struct task_struct *p);
789 
790 /* Give new sched_entity start runnable values to heavy its load in infant time */
init_entity_runnable_average(struct sched_entity * se)791 void init_entity_runnable_average(struct sched_entity *se)
792 {
793     struct sched_avg *sa = &se->avg;
794 
795     memset(sa, 0, sizeof(*sa));
796 
797     /*
798      * Tasks are initialized with full load to be seen as heavy tasks until
799      * they get a chance to stabilize to their real load level.
800      * Group entities are initialized with zero load to reflect the fact that
801      * nothing has been attached to the task group yet.
802      */
803     if (entity_is_task(se)) {
804         sa->load_avg = scale_load_down(se->load.weight);
805     }
806 
807     /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
808 }
809 
810 static void attach_entity_cfs_rq(struct sched_entity *se);
811 
812 /*
813  * With new tasks being created, their initial util_avgs are extrapolated
814  * based on the cfs_rq's current util_avg:
815  *
816  *   util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
817  *
818  * However, in many cases, the above util_avg does not give a desired
819  * value. Moreover, the sum of the util_avgs may be divergent, such
820  * as when the series is a harmonic series.
821  *
822  * To solve this problem, we also cap the util_avg of successive tasks to
823  * only 1/2 of the left utilization budget:
824  *
825  *   util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
826  *
827  * where n denotes the nth task and cpu_scale the CPU capacity.
828  *
829  * For example, for a CPU with 1024 of capacity, a simplest series from
830  * the beginning would be like
831  *
832  *  task  util_avg: 512, 256, 128,  64,  32,   16,    8, ...
833  * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
834  *
835  * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
836  * if util_avg > util_avg_cap.
837  */
post_init_entity_util_avg(struct task_struct * p)838 void post_init_entity_util_avg(struct task_struct *p)
839 {
840     struct sched_entity *se = &p->se;
841     struct cfs_rq *cfs_rq = cfs_rq_of(se);
842     struct sched_avg *sa = &se->avg;
843     long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
844     long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
845 
846     if (cap > 0) {
847         if (cfs_rq->avg.util_avg != 0) {
848             sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
849             sa->util_avg /= (cfs_rq->avg.load_avg + 1);
850 
851             if (sa->util_avg > cap) {
852                 sa->util_avg = cap;
853             }
854         } else {
855             sa->util_avg = cap;
856         }
857     }
858 
859     sa->runnable_avg = sa->util_avg;
860 
861     if (p->sched_class != &fair_sched_class) {
862         /*
863          * For !fair tasks do:
864          *
865         update_cfs_rq_load_avg(now, cfs_rq);
866         attach_entity_load_avg(cfs_rq, se);
867         switched_from_fair(rq, p);
868          *
869          * such that the next switched_to_fair() has the
870          * expected state.
871          */
872         se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
873         return;
874     }
875 
876     attach_entity_cfs_rq(se);
877 }
878 
879 #else  /* !CONFIG_SMP */
init_entity_runnable_average(struct sched_entity * se)880 void init_entity_runnable_average(struct sched_entity *se)
881 {
882 }
post_init_entity_util_avg(struct task_struct * p)883 void post_init_entity_util_avg(struct task_struct *p)
884 {
885 }
update_tg_load_avg(struct cfs_rq * cfs_rq)886 static void update_tg_load_avg(struct cfs_rq *cfs_rq)
887 {
888 }
889 #endif /* CONFIG_SMP */
890 
891 /*
892  * Update the current task's runtime statistics.
893  */
update_curr(struct cfs_rq * cfs_rq)894 static void update_curr(struct cfs_rq *cfs_rq)
895 {
896     struct sched_entity *curr = cfs_rq->curr;
897     u64 now = rq_clock_task(rq_of(cfs_rq));
898     u64 delta_exec;
899 
900     if (unlikely(!curr)) {
901         return;
902     }
903 
904     delta_exec = now - curr->exec_start;
905     if (unlikely((s64)delta_exec <= 0)) {
906         return;
907     }
908 
909     curr->exec_start = now;
910 
911     schedstat_set(curr->statistics.exec_max, max(delta_exec, curr->statistics.exec_max));
912 
913     curr->sum_exec_runtime += delta_exec;
914     schedstat_add(cfs_rq->exec_clock, delta_exec);
915 
916     curr->vruntime += calc_delta_fair(delta_exec, curr);
917     update_min_vruntime(cfs_rq);
918 
919     if (entity_is_task(curr)) {
920         struct task_struct *curtask = task_of(curr);
921 
922         trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
923         cgroup_account_cputime(curtask, delta_exec);
924         account_group_exec_runtime(curtask, delta_exec);
925     }
926 
927     account_cfs_rq_runtime(cfs_rq, delta_exec);
928 }
929 
update_curr_fair(struct rq * rq)930 static void update_curr_fair(struct rq *rq)
931 {
932     update_curr(cfs_rq_of(&rq->curr->se));
933 }
934 
update_stats_wait_start(struct cfs_rq * cfs_rq,struct sched_entity * se)935 static inline void update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
936 {
937     u64 wait_start, prev_wait_start;
938 
939     if (!schedstat_enabled()) {
940         return;
941     }
942 
943     wait_start = rq_clock(rq_of(cfs_rq));
944     prev_wait_start = schedstat_val(se->statistics.wait_start);
945     if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && likely(wait_start > prev_wait_start)) {
946         wait_start -= prev_wait_start;
947     }
948 
949     __schedstat_set(se->statistics.wait_start, wait_start);
950 }
951 
update_stats_wait_end(struct cfs_rq * cfs_rq,struct sched_entity * se)952 static inline void update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
953 {
954     struct task_struct *p;
955     u64 delta;
956 
957     if (!schedstat_enabled()) {
958         return;
959     }
960 
961     delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
962 
963     if (entity_is_task(se)) {
964         p = task_of(se);
965         if (task_on_rq_migrating(p)) {
966             /*
967              * Preserve migrating task's wait time so wait_start
968              * time stamp can be adjusted to accumulate wait time
969              * prior to migration.
970              */
971             __schedstat_set(se->statistics.wait_start, delta);
972             return;
973         }
974         trace_sched_stat_wait(p, delta);
975     }
976 
977     __schedstat_set(se->statistics.wait_max, max(schedstat_val(se->statistics.wait_max), delta));
978     __schedstat_inc(se->statistics.wait_count);
979     __schedstat_add(se->statistics.wait_sum, delta);
980     __schedstat_set(se->statistics.wait_start, 0);
981 }
982 
update_stats_enqueue_sleeper(struct cfs_rq * cfs_rq,struct sched_entity * se)983 static inline void update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
984 {
985     struct task_struct *tsk = NULL;
986     u64 sleep_start, block_start;
987 
988     if (!schedstat_enabled()) {
989         return;
990     }
991 
992     sleep_start = schedstat_val(se->statistics.sleep_start);
993     block_start = schedstat_val(se->statistics.block_start);
994 
995     if (entity_is_task(se)) {
996         tsk = task_of(se);
997     }
998 
999     if (sleep_start) {
1000         u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
1001         if ((s64)delta < 0) {
1002             delta = 0;
1003         }
1004 
1005         if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) {
1006             __schedstat_set(se->statistics.sleep_max, delta);
1007         }
1008 
1009         __schedstat_set(se->statistics.sleep_start, 0);
1010         __schedstat_add(se->statistics.sum_sleep_runtime, delta);
1011 
1012         if (tsk) {
1013             account_scheduler_latency(tsk, delta >> 10, 1);
1014             trace_sched_stat_sleep(tsk, delta);
1015         }
1016     }
1017     if (block_start) {
1018         u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
1019         if ((s64)delta < 0) {
1020             delta = 0;
1021         }
1022 
1023         if (unlikely(delta > schedstat_val(se->statistics.block_max))) {
1024             __schedstat_set(se->statistics.block_max, delta);
1025         }
1026 
1027         __schedstat_set(se->statistics.block_start, 0);
1028         __schedstat_add(se->statistics.sum_sleep_runtime, delta);
1029 
1030         if (tsk) {
1031             if (tsk->in_iowait) {
1032                 __schedstat_add(se->statistics.iowait_sum, delta);
1033                 __schedstat_inc(se->statistics.iowait_count);
1034                 trace_sched_stat_iowait(tsk, delta);
1035             }
1036 
1037             trace_sched_stat_blocked(tsk, delta);
1038 
1039             /*
1040              * Blocking time is in units of nanosecs, so shift by
1041              * 20 to get a milliseconds-range estimation of the
1042              * amount of time that the task spent sleeping:
1043              */
1044             if (unlikely(prof_on == SLEEP_PROFILING)) {
1045                 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), delta >> FAIR_TWENTY);
1046             }
1047             account_scheduler_latency(tsk, delta >> 10, 0);
1048         }
1049     }
1050 }
1051 
1052 /*
1053  * Task is being enqueued - update stats:
1054  */
update_stats_enqueue(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)1055 static inline void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1056 {
1057     if (!schedstat_enabled()) {
1058         return;
1059     }
1060 
1061     /*
1062      * Are we enqueueing a waiting task? (for current tasks
1063      * a dequeue/enqueue event is a NOP)
1064      */
1065     if (se != cfs_rq->curr) {
1066         update_stats_wait_start(cfs_rq, se);
1067     }
1068 
1069     if (flags & ENQUEUE_WAKEUP) {
1070         update_stats_enqueue_sleeper(cfs_rq, se);
1071     }
1072 }
1073 
update_stats_dequeue(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)1074 static inline void update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1075 {
1076     if (!schedstat_enabled()) {
1077         return;
1078     }
1079 
1080     /*
1081      * Mark the end of the wait period if dequeueing a
1082      * waiting task:
1083      */
1084     if (se != cfs_rq->curr) {
1085         update_stats_wait_end(cfs_rq, se);
1086     }
1087 
1088     if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1089         struct task_struct *tsk = task_of(se);
1090 
1091         if (tsk->state & TASK_INTERRUPTIBLE) {
1092             __schedstat_set(se->statistics.sleep_start, rq_clock(rq_of(cfs_rq)));
1093         }
1094         if (tsk->state & TASK_UNINTERRUPTIBLE) {
1095             __schedstat_set(se->statistics.block_start, rq_clock(rq_of(cfs_rq)));
1096         }
1097     }
1098 }
1099 
1100 /*
1101  * We are picking a new current task - update its stats:
1102  */
update_stats_curr_start(struct cfs_rq * cfs_rq,struct sched_entity * se)1103 static inline void update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1104 {
1105     /*
1106      * We are starting a new run period:
1107      */
1108     se->exec_start = rq_clock_task(rq_of(cfs_rq));
1109 }
1110 
1111 /**************************************************
1112  * Scheduling class queueing methods:
1113  */
1114 
1115 #ifdef CONFIG_NUMA_BALANCING
1116 /*
1117  * Approximate time to scan a full NUMA task in ms. The task scan period is
1118  * calculated based on the tasks virtual memory size and
1119  * numa_balancing_scan_size.
1120  */
1121 unsigned int sysctl_numa_balancing_scan_period_min = FAIR_ONETHOUSAND;
1122 unsigned int sysctl_numa_balancing_scan_period_max = FAIR_SIXTYTHOUSAND;
1123 
1124 /* Portion of address space to scan in MB */
1125 unsigned int sysctl_numa_balancing_scan_size = FAIR_TWOHUNDREDFIFTYSIX;
1126 
1127 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1128 unsigned int sysctl_numa_balancing_scan_delay = FAIR_ONETHOUSAND;
1129 
1130 struct numa_group {
1131     refcount_t refcount;
1132 
1133     spinlock_t lock; /* nr_tasks, tasks */
1134     int nr_tasks;
1135     pid_t gid;
1136     int active_nodes;
1137 
1138     struct rcu_head rcu;
1139     unsigned long total_faults;
1140     unsigned long max_faults_cpu;
1141     /*
1142      * Faults_cpu is used to decide whether memory should move
1143      * towards the CPU. As a consequence, these stats are weighted
1144      * more by CPU use than by memory faults.
1145      */
1146     unsigned long *faults_cpu;
1147     unsigned long faults[];
1148 };
1149 
1150 /*
1151  * For functions that can be called in multiple contexts that permit reading
1152  * ->numa_group (see struct task_struct for locking rules).
1153  */
deref_task_numa_group(struct task_struct * p)1154 static struct numa_group *deref_task_numa_group(struct task_struct *p)
1155 {
1156     return rcu_dereference_check(p->numa_group,
1157                                  p == current || (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu)));
1158 }
1159 
deref_curr_numa_group(struct task_struct * p)1160 static struct numa_group *deref_curr_numa_group(struct task_struct *p)
1161 {
1162     return rcu_dereference_protected(p->numa_group, p == current);
1163 }
1164 
1165 static inline unsigned long group_faults_priv(struct numa_group *ng);
1166 static inline unsigned long group_faults_shared(struct numa_group *ng);
1167 
task_nr_scan_windows(struct task_struct * p)1168 static unsigned int task_nr_scan_windows(struct task_struct *p)
1169 {
1170     unsigned long rss = 0;
1171     unsigned long nr_scan_pages;
1172 
1173     /*
1174      * Calculations based on RSS as non-present and empty pages are skipped
1175      * by the PTE scanner and NUMA hinting faults should be trapped based
1176      * on resident pages
1177      */
1178     nr_scan_pages = sysctl_numa_balancing_scan_size << (FAIR_TWENTY - PAGE_SHIFT);
1179     rss = get_mm_rss(p->mm);
1180     if (!rss) {
1181         rss = nr_scan_pages;
1182     }
1183 
1184     rss = round_up(rss, nr_scan_pages);
1185     return rss / nr_scan_pages;
1186 }
1187 
1188 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1189 #define MAX_SCAN_WINDOW 2560
1190 
task_scan_min(struct task_struct * p)1191 static unsigned int task_scan_min(struct task_struct *p)
1192 {
1193     unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
1194     unsigned int scan, floor;
1195     unsigned int windows = 1;
1196 
1197     if (scan_size < MAX_SCAN_WINDOW) {
1198         windows = MAX_SCAN_WINDOW / scan_size;
1199     }
1200     floor = FAIR_ONETHOUSAND / windows;
1201 
1202     scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1203     return max_t(unsigned int, floor, scan);
1204 }
1205 
task_scan_start(struct task_struct * p)1206 static unsigned int task_scan_start(struct task_struct *p)
1207 {
1208     unsigned long smin = task_scan_min(p);
1209     unsigned long period = smin;
1210     struct numa_group *ng;
1211 
1212     /* Scale the maximum scan period with the amount of shared memory. */
1213     rcu_read_lock();
1214     ng = rcu_dereference(p->numa_group);
1215     if (ng) {
1216         unsigned long shared = group_faults_shared(ng);
1217         unsigned long private = group_faults_priv(ng);
1218 
1219         period *= refcount_read(&ng->refcount);
1220         period *= shared + 1;
1221         period /= private + shared + 1;
1222     }
1223     rcu_read_unlock();
1224 
1225     return max(smin, period);
1226 }
1227 
task_scan_max(struct task_struct * p)1228 static unsigned int task_scan_max(struct task_struct *p)
1229 {
1230     unsigned long smin = task_scan_min(p);
1231     unsigned long smax;
1232     struct numa_group *ng;
1233 
1234     /* Watch for min being lower than max due to floor calculations */
1235     smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
1236 
1237     /* Scale the maximum scan period with the amount of shared memory. */
1238     ng = deref_curr_numa_group(p);
1239     if (ng) {
1240         unsigned long shared = group_faults_shared(ng);
1241         unsigned long private = group_faults_priv(ng);
1242         unsigned long period = smax;
1243 
1244         period *= refcount_read(&ng->refcount);
1245         period *= shared + 1;
1246         period /= private + shared + 1;
1247 
1248         smax = max(smax, period);
1249     }
1250 
1251     return max(smin, smax);
1252 }
1253 
account_numa_enqueue(struct rq * rq,struct task_struct * p)1254 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1255 {
1256     rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
1257     rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1258 }
1259 
account_numa_dequeue(struct rq * rq,struct task_struct * p)1260 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1261 {
1262     rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
1263     rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1264 }
1265 
1266 /* Shared or private faults. */
1267 #define NR_NUMA_HINT_FAULT_TYPES 2
1268 
1269 /* Memory and CPU locality */
1270 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1271 
1272 /* Averaged statistics, and temporary buffers. */
1273 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1274 
task_numa_group_id(struct task_struct * p)1275 pid_t task_numa_group_id(struct task_struct *p)
1276 {
1277     struct numa_group *ng;
1278     pid_t gid = 0;
1279 
1280     rcu_read_lock();
1281     ng = rcu_dereference(p->numa_group);
1282     if (ng) {
1283         gid = ng->gid;
1284     }
1285     rcu_read_unlock();
1286 
1287     return gid;
1288 }
1289 
1290 /*
1291  * The averaged statistics, shared & private, memory & CPU,
1292  * occupy the first half of the array. The second half of the
1293  * array is for current counters, which are averaged into the
1294  * first set by task_numa_placement.
1295  */
task_faults_idx(enum numa_faults_stats s,int nid,int priv)1296 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
1297 {
1298     return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
1299 }
1300 
task_faults(struct task_struct * p,int nid)1301 static inline unsigned long task_faults(struct task_struct *p, int nid)
1302 {
1303     if (!p->numa_faults) {
1304         return 0;
1305     }
1306 
1307     return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
1308 }
1309 
group_faults(struct task_struct * p,int nid)1310 static inline unsigned long group_faults(struct task_struct *p, int nid)
1311 {
1312     struct numa_group *ng = deref_task_numa_group(p);
1313 
1314     if (!ng) {
1315         return 0;
1316     }
1317 
1318     return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] + ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1319 }
1320 
group_faults_cpu(struct numa_group * group,int nid)1321 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1322 {
1323     return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] + group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
1324 }
1325 
group_faults_priv(struct numa_group * ng)1326 static inline unsigned long group_faults_priv(struct numa_group *ng)
1327 {
1328     unsigned long faults = 0;
1329     int node;
1330 
1331     for_each_online_node(node)
1332     {
1333         faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
1334     }
1335 
1336     return faults;
1337 }
1338 
group_faults_shared(struct numa_group * ng)1339 static inline unsigned long group_faults_shared(struct numa_group *ng)
1340 {
1341     unsigned long faults = 0;
1342     int node;
1343 
1344     for_each_online_node(node)
1345     {
1346         faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
1347     }
1348 
1349     return faults;
1350 }
1351 
1352 /*
1353  * A node triggering more than 1/3 as many NUMA faults as the maximum is
1354  * considered part of a numa group's pseudo-interleaving set. Migrations
1355  * between these nodes are slowed down, to allow things to settle down.
1356  */
1357 #define ACTIVE_NODE_FRACTION 3
1358 
numa_is_active_node(int nid,struct numa_group * ng)1359 static bool numa_is_active_node(int nid, struct numa_group *ng)
1360 {
1361     return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1362 }
1363 
1364 /* Handle placement on systems where not all nodes are directly connected. */
score_nearby_nodes(struct task_struct * p,int nid,int maxdist,bool task)1365 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, int maxdist, bool task)
1366 {
1367     unsigned long score = 0;
1368     int node;
1369 
1370     /*
1371      * All nodes are directly connected, and the same distance
1372      * from each other. No need for fancy placement algorithms.
1373      */
1374     if (sched_numa_topology_type == NUMA_DIRECT) {
1375         return 0;
1376     }
1377 
1378     /*
1379      * This code is called for each node, introducing N^2 complexity,
1380      * which should be ok given the number of nodes rarely exceeds 8.
1381      */
1382     for_each_online_node(node)
1383     {
1384         unsigned long faults;
1385         int dist = node_distance(nid, node);
1386         /*
1387          * The furthest away nodes in the system are not interesting
1388          * for placement; nid was already counted.
1389          */
1390         if (dist == sched_max_numa_distance || node == nid) {
1391             continue;
1392         }
1393 
1394         /*
1395          * On systems with a backplane NUMA topology, compare groups
1396          * of nodes, and move tasks towards the group with the most
1397          * memory accesses. When comparing two nodes at distance
1398          * "hoplimit", only nodes closer by than "hoplimit" are part
1399          * of each group. Skip other nodes.
1400          */
1401         if (sched_numa_topology_type == NUMA_BACKPLANE && dist >= maxdist) {
1402             continue;
1403         }
1404 
1405         /* Add up the faults from nearby nodes. */
1406         if (task) {
1407             faults = task_faults(p, node);
1408         } else {
1409             faults = group_faults(p, node);
1410         }
1411 
1412         /*
1413          * On systems with a glueless mesh NUMA topology, there are
1414          * no fixed "groups of nodes". Instead, nodes that are not
1415          * directly connected bounce traffic through intermediate
1416          * nodes; a numa_group can occupy any set of nodes.
1417          * The further away a node is, the less the faults count.
1418          * This seems to result in good task placement.
1419          */
1420         if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1421             faults *= (sched_max_numa_distance - dist);
1422             faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1423         }
1424 
1425         score += faults;
1426     }
1427 
1428     return score;
1429 }
1430 
1431 /*
1432  * These return the fraction of accesses done by a particular task, or
1433  * task group, on a particular numa node.  The group weight is given a
1434  * larger multiplier, in order to group tasks together that are almost
1435  * evenly spread out between numa nodes.
1436  */
task_weight(struct task_struct * p,int nid,int dist)1437 static inline unsigned long task_weight(struct task_struct *p, int nid, int dist)
1438 {
1439     unsigned long faults, total_faults;
1440 
1441     if (!p->numa_faults) {
1442         return 0;
1443     }
1444 
1445     total_faults = p->total_numa_faults;
1446 
1447     if (!total_faults) {
1448         return 0;
1449     }
1450 
1451     faults = task_faults(p, nid);
1452     faults += score_nearby_nodes(p, nid, dist, true);
1453 
1454     return FAIR_ONETHOUSAND * faults / total_faults;
1455 }
1456 
group_weight(struct task_struct * p,int nid,int dist)1457 static inline unsigned long group_weight(struct task_struct *p, int nid, int dist)
1458 {
1459     struct numa_group *ng = deref_task_numa_group(p);
1460     unsigned long faults, total_faults;
1461 
1462     if (!ng) {
1463         return 0;
1464     }
1465 
1466     total_faults = ng->total_faults;
1467 
1468     if (!total_faults) {
1469         return 0;
1470     }
1471 
1472     faults = group_faults(p, nid);
1473     faults += score_nearby_nodes(p, nid, dist, false);
1474 
1475     return FAIR_ONETHOUSAND * faults / total_faults;
1476 }
1477 
should_numa_migrate_memory(struct task_struct * p,struct page * page,int src_nid,int dst_cpu)1478 bool should_numa_migrate_memory(struct task_struct *p, struct page *page, int src_nid, int dst_cpu)
1479 {
1480     struct numa_group *ng = deref_curr_numa_group(p);
1481     int dst_nid = cpu_to_node(dst_cpu);
1482     int last_cpupid, this_cpupid;
1483 
1484     this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1485     last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1486     /*
1487      * Allow first faults or private faults to migrate immediately early in
1488      * the lifetime of a task. The magic number 4 is based on waiting for
1489      * two full passes of the "multi-stage node selection" test that is
1490      * executed below.
1491      */
1492     if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= FAIR_FOUR) &&
1493         (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) {
1494         return true;
1495     }
1496 
1497     /*
1498      * Multi-stage node selection is used in conjunction with a periodic
1499      * migration fault to build a temporal task<->page relation. By using
1500      * a two-stage filter we remove short/unlikely relations.
1501      *
1502      * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1503      * a task's usage of a particular page (n_p) per total usage of this
1504      * page (n_t) (in a given time-span) to a probability.
1505      *
1506      * Our periodic faults will sample this probability and getting the
1507      * same result twice in a row, given these samples are fully
1508      * independent, is then given by P(n)^2, provided our sample period
1509      * is sufficiently short compared to the usage pattern.
1510      *
1511      * This quadric squishes small probabilities, making it less likely we
1512      * act on an unlikely task<->page relation.
1513      */
1514     if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != dst_nid) {
1515         return false;
1516     }
1517 
1518     /* Always allow migrate on private faults */
1519     if (cpupid_match_pid(p, last_cpupid)) {
1520         return true;
1521     }
1522 
1523     /* A shared fault, but p->numa_group has not been set up yet. */
1524     if (!ng) {
1525         return true;
1526     }
1527 
1528     /*
1529      * Destination node is much more heavily used than the source
1530      * node? Allow migration.
1531      */
1532     if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * ACTIVE_NODE_FRACTION) {
1533         return true;
1534     }
1535 
1536     /*
1537      * Distribute memory according to CPU & memory use on each node,
1538      * with 3/4 hysteresis to avoid unnecessary memory migrations:
1539      *
1540      * faults_cpu(dst)   3   faults_cpu(src)
1541      * --------------- * - > ---------------
1542      * faults_mem(dst)   4   faults_mem(src)
1543      */
1544     return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * FAIR_THREE >
1545            group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * FAIR_FOUR;
1546 }
1547 
1548 /*
1549  * 'numa_type' describes the node at the moment of load balancing.
1550  */
1551 enum numa_type {
1552     /* The node has spare capacity that can be used to run more tasks.  */
1553     node_has_spare = 0,
1554     /*
1555      * The node is fully used and the tasks don't compete for more CPU
1556      * cycles. Nevertheless, some tasks might wait before running.
1557      */
1558     node_fully_busy,
1559     /*
1560      * The node is overloaded and can't provide expected CPU cycles to all
1561      * tasks.
1562      */
1563     node_overloaded
1564 };
1565 
1566 /* Cached statistics for all CPUs within a node */
1567 struct numa_stats {
1568     unsigned long load;
1569     unsigned long runnable;
1570     unsigned long util;
1571     /* Total compute capacity of CPUs on a node */
1572     unsigned long compute_capacity;
1573     unsigned int nr_running;
1574     unsigned int weight;
1575     enum numa_type node_type;
1576     int idle_cpu;
1577 };
1578 
is_core_idle(int cpu)1579 static inline bool is_core_idle(int cpu)
1580 {
1581 #ifdef CONFIG_SCHED_SMT
1582     int sibling;
1583 
1584     for_each_cpu(sibling, cpu_smt_mask(cpu))
1585     {
1586         if (cpu == sibling) {
1587             continue;
1588         }
1589 
1590         if (!idle_cpu(sibling)) {
1591             return false;
1592         }
1593     }
1594 #endif
1595 
1596     return true;
1597 }
1598 
1599 struct task_numa_env {
1600     struct task_struct *p;
1601 
1602     int src_cpu, src_nid;
1603     int dst_cpu, dst_nid;
1604 
1605     struct numa_stats src_stats, dst_stats;
1606 
1607     int imbalance_pct;
1608     int dist;
1609 
1610     struct task_struct *best_task;
1611     long best_imp;
1612     int best_cpu;
1613 };
1614 
1615 static unsigned long cpu_load(struct rq *rq);
1616 static unsigned long cpu_runnable(struct rq *rq);
1617 static inline long adjust_numa_imbalance(int imbalance, int nr_running);
1618 
numa_classify(unsigned int imbalance_pct,struct numa_stats * ns)1619 static inline enum numa_type numa_classify(unsigned int imbalance_pct, struct numa_stats *ns)
1620 {
1621     if ((ns->nr_running > ns->weight) &&
1622         (((ns->compute_capacity * FAIR_ONEHUNDRED) < (ns->util * imbalance_pct)) ||
1623          ((ns->compute_capacity * imbalance_pct) < (ns->runnable * FAIR_ONEHUNDRED)))) {
1624         return node_overloaded;
1625     }
1626 
1627     if ((ns->nr_running < ns->weight) ||
1628         (((ns->compute_capacity * FAIR_ONEHUNDRED) > (ns->util * imbalance_pct)) &&
1629          ((ns->compute_capacity * imbalance_pct) > (ns->runnable * FAIR_ONEHUNDRED)))) {
1630         return node_has_spare;
1631     }
1632 
1633     return node_fully_busy;
1634 }
1635 
1636 #ifdef CONFIG_SCHED_SMT
1637 /* Forward declarations of select_idle_sibling helpers */
1638 static inline bool test_idle_cores(int cpu, bool def);
numa_idle_core(int idle_core,int cpu)1639 static inline int numa_idle_core(int idle_core, int cpu)
1640 {
1641     if (!static_branch_likely(&sched_smt_present) || idle_core >= 0 || !test_idle_cores(cpu, false)) {
1642         return idle_core;
1643     }
1644 
1645     /*
1646      * Prefer cores instead of packing HT siblings
1647      * and triggering future load balancing.
1648      */
1649     if (is_core_idle(cpu)) {
1650         idle_core = cpu;
1651     }
1652 
1653     return idle_core;
1654 }
1655 #else
numa_idle_core(int idle_core,int cpu)1656 static inline int numa_idle_core(int idle_core, int cpu)
1657 {
1658     return idle_core;
1659 }
1660 #endif
1661 
1662 /*
1663  * Gather all necessary information to make NUMA balancing placement
1664  * decisions that are compatible with standard load balancer. This
1665  * borrows code and logic from update_sg_lb_stats but sharing a
1666  * common implementation is impractical.
1667  */
update_numa_stats(struct task_numa_env * env,struct numa_stats * ns,int nid,bool find_idle)1668 static void update_numa_stats(struct task_numa_env *env, struct numa_stats *ns, int nid, bool find_idle)
1669 {
1670     int cpu, idle_core = -1;
1671 
1672     memset(ns, 0, sizeof(*ns));
1673     ns->idle_cpu = -1;
1674 
1675     rcu_read_lock();
1676     for_each_cpu(cpu, cpumask_of_node(nid))
1677     {
1678         struct rq *rq = cpu_rq(cpu);
1679 
1680         ns->load += cpu_load(rq);
1681         ns->runnable += cpu_runnable(rq);
1682         ns->util += cpu_util(cpu);
1683         ns->nr_running += rq->cfs.h_nr_running;
1684         ns->compute_capacity += capacity_of(cpu);
1685 
1686         if (find_idle && !rq->nr_running && idle_cpu(cpu)) {
1687             if (READ_ONCE(rq->numa_migrate_on) || !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
1688                 continue;
1689             }
1690 
1691             if (ns->idle_cpu == -1) {
1692                 ns->idle_cpu = cpu;
1693             }
1694 
1695             idle_core = numa_idle_core(idle_core, cpu);
1696         }
1697     }
1698     rcu_read_unlock();
1699 
1700     ns->weight = cpumask_weight(cpumask_of_node(nid));
1701 
1702     ns->node_type = numa_classify(env->imbalance_pct, ns);
1703 
1704     if (idle_core >= 0) {
1705         ns->idle_cpu = idle_core;
1706     }
1707 }
1708 
task_numa_assign(struct task_numa_env * env,struct task_struct * p,long imp)1709 static void task_numa_assign(struct task_numa_env *env, struct task_struct *p, long imp)
1710 {
1711     struct rq *rq = cpu_rq(env->dst_cpu);
1712 
1713     /* Check if run-queue part of active NUMA balance. */
1714     if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
1715         int cpu;
1716         int start = env->dst_cpu;
1717 
1718         /* Find alternative idle CPU. */
1719         for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start)
1720         {
1721             if (cpu == env->best_cpu || !idle_cpu(cpu) || !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
1722                 continue;
1723             }
1724 
1725             env->dst_cpu = cpu;
1726             rq = cpu_rq(env->dst_cpu);
1727             if (!xchg(&rq->numa_migrate_on, 1)) {
1728                 goto assign;
1729             }
1730         }
1731 
1732         /* Failed to find an alternative idle CPU */
1733         return;
1734     }
1735 
1736 assign:
1737     /*
1738      * Clear previous best_cpu/rq numa-migrate flag, since task now
1739      * found a better CPU to move/swap.
1740      */
1741     if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) {
1742         rq = cpu_rq(env->best_cpu);
1743         WRITE_ONCE(rq->numa_migrate_on, 0);
1744     }
1745 
1746     if (env->best_task) {
1747         put_task_struct(env->best_task);
1748     }
1749     if (p) {
1750         get_task_struct(p);
1751     }
1752 
1753     env->best_task = p;
1754     env->best_imp = imp;
1755     env->best_cpu = env->dst_cpu;
1756 }
1757 
load_too_imbalanced(long src_load,long dst_load,struct task_numa_env * env)1758 static bool load_too_imbalanced(long src_load, long dst_load, struct task_numa_env *env)
1759 {
1760     long imb, old_imb;
1761     long orig_src_load, orig_dst_load;
1762     long src_capacity, dst_capacity;
1763 
1764     /*
1765      * The load is corrected for the CPU capacity available on each node.
1766      *
1767      * src_load        dst_load
1768      * ------------ vs ---------
1769      * src_capacity    dst_capacity
1770      */
1771     src_capacity = env->src_stats.compute_capacity;
1772     dst_capacity = env->dst_stats.compute_capacity;
1773 
1774     imb = abs(dst_load * src_capacity - src_load * dst_capacity);
1775 
1776     orig_src_load = env->src_stats.load;
1777     orig_dst_load = env->dst_stats.load;
1778 
1779     old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
1780 
1781     /* Would this change make things worse? */
1782     return (imb > old_imb);
1783 }
1784 
1785 /*
1786  * Maximum NUMA importance can be 1998 (2*999);
1787  * SMALLIMP @ 30 would be close to 1998/64.
1788  * Used to deter task migration.
1789  */
1790 #define SMALLIMP 30
1791 
1792 /*
1793  * This checks if the overall compute and NUMA accesses of the system would
1794  * be improved if the source tasks was migrated to the target dst_cpu taking
1795  * into account that it might be best if task running on the dst_cpu should
1796  * be exchanged with the source task
1797  */
task_numa_compare(struct task_numa_env * env,long taskimp,long groupimp,bool maymove)1798 static bool task_numa_compare(struct task_numa_env *env, long taskimp, long groupimp, bool maymove)
1799 {
1800     struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
1801     struct rq *dst_rq = cpu_rq(env->dst_cpu);
1802     long imp = p_ng ? groupimp : taskimp;
1803     struct task_struct *cur;
1804     long src_load, dst_load;
1805     int dist = env->dist;
1806     long moveimp = imp;
1807     long load;
1808     bool stopsearch = false;
1809 
1810     if (READ_ONCE(dst_rq->numa_migrate_on)) {
1811         return false;
1812     }
1813 
1814     rcu_read_lock();
1815     cur = rcu_dereference(dst_rq->curr);
1816     if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) {
1817         cur = NULL;
1818     }
1819 
1820     /*
1821      * Because we have preemption enabled we can get migrated around and
1822      * end try selecting ourselves (current == env->p) as a swap candidate.
1823      */
1824     if (cur == env->p) {
1825         stopsearch = true;
1826         goto unlock;
1827     }
1828 
1829     if (!cur) {
1830         if (maymove && moveimp >= env->best_imp) {
1831             goto assign;
1832         } else {
1833             goto unlock;
1834         }
1835     }
1836 
1837     /* Skip this swap candidate if cannot move to the source cpu. */
1838     if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) {
1839         goto unlock;
1840     }
1841 
1842     /*
1843      * Skip this swap candidate if it is not moving to its preferred
1844      * node and the best task is.
1845      */
1846     if (env->best_task && env->best_task->numa_preferred_nid == env->src_nid &&
1847         cur->numa_preferred_nid != env->src_nid) {
1848         goto unlock;
1849     }
1850 
1851     /*
1852      * "imp" is the fault differential for the source task between the
1853      * source and destination node. Calculate the total differential for
1854      * the source task and potential destination task. The more negative
1855      * the value is, the more remote accesses that would be expected to
1856      * be incurred if the tasks were swapped.
1857      *
1858      * If dst and source tasks are in the same NUMA group, or not
1859      * in any group then look only at task weights.
1860      */
1861     cur_ng = rcu_dereference(cur->numa_group);
1862     if (cur_ng == p_ng) {
1863         imp = taskimp + task_weight(cur, env->src_nid, dist) - task_weight(cur, env->dst_nid, dist);
1864         /*
1865          * Add some hysteresis to prevent swapping the
1866          * tasks within a group over tiny differences.
1867          */
1868         if (cur_ng) {
1869             imp -= imp / 0x10;
1870         }
1871     } else {
1872         /*
1873          * Compare the group weights. If a task is all by itself
1874          * (not part of a group), use the task weight instead.
1875          */
1876         if (cur_ng && p_ng) {
1877             imp += group_weight(cur, env->src_nid, dist) - group_weight(cur, env->dst_nid, dist);
1878         } else {
1879             imp += task_weight(cur, env->src_nid, dist) - task_weight(cur, env->dst_nid, dist);
1880         }
1881     }
1882 
1883     /* Discourage picking a task already on its preferred node */
1884     if (cur->numa_preferred_nid == env->dst_nid) {
1885         imp -= imp / 0x10;
1886     }
1887 
1888     /*
1889      * Encourage picking a task that moves to its preferred node.
1890      * This potentially makes imp larger than it's maximum of
1891      * 1998 (see SMALLIMP and task_weight for why) but in this
1892      * case, it does not matter.
1893      */
1894     if (cur->numa_preferred_nid == env->src_nid) {
1895         imp += imp / 0x8;
1896     }
1897 
1898     if (maymove && moveimp > imp && moveimp > env->best_imp) {
1899         imp = moveimp;
1900         cur = NULL;
1901         goto assign;
1902     }
1903 
1904     /*
1905      * Prefer swapping with a task moving to its preferred node over a
1906      * task that is not.
1907      */
1908     if (env->best_task && cur->numa_preferred_nid == env->src_nid &&
1909         env->best_task->numa_preferred_nid != env->src_nid) {
1910         goto assign;
1911     }
1912 
1913     /*
1914      * If the NUMA importance is less than SMALLIMP,
1915      * task migration might only result in ping pong
1916      * of tasks and also hurt performance due to cache
1917      * misses.
1918      */
1919     if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 0x2) {
1920         goto unlock;
1921     }
1922 
1923     /*
1924      * In the overloaded case, try and keep the load balanced.
1925      */
1926     load = task_h_load(env->p) - task_h_load(cur);
1927     if (!load) {
1928         goto assign;
1929     }
1930 
1931     dst_load = env->dst_stats.load + load;
1932     src_load = env->src_stats.load - load;
1933 
1934     if (load_too_imbalanced(src_load, dst_load, env)) {
1935         goto unlock;
1936     }
1937 
1938 assign:
1939     /* Evaluate an idle CPU for a task numa move. */
1940     if (!cur) {
1941         int cpu = env->dst_stats.idle_cpu;
1942 
1943         /* Nothing cached so current CPU went idle since the search. */
1944         if (cpu < 0) {
1945             cpu = env->dst_cpu;
1946         }
1947 
1948         /*
1949          * If the CPU is no longer truly idle and the previous best CPU
1950          * is, keep using it.
1951          */
1952         if (!idle_cpu(cpu) && env->best_cpu >= 0 && idle_cpu(env->best_cpu)) {
1953             cpu = env->best_cpu;
1954         }
1955 
1956         env->dst_cpu = cpu;
1957     }
1958 
1959     task_numa_assign(env, cur, imp);
1960 
1961     /*
1962      * If a move to idle is allowed because there is capacity or load
1963      * balance improves then stop the search. While a better swap
1964      * candidate may exist, a search is not free.
1965      */
1966     if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu)) {
1967         stopsearch = true;
1968     }
1969 
1970     /*
1971      * If a swap candidate must be identified and the current best task
1972      * moves its preferred node then stop the search.
1973      */
1974     if (!maymove && env->best_task && env->best_task->numa_preferred_nid == env->src_nid) {
1975         stopsearch = true;
1976     }
1977 unlock:
1978     rcu_read_unlock();
1979 
1980     return stopsearch;
1981 }
1982 
task_numa_find_cpu(struct task_numa_env * env,long taskimp,long groupimp)1983 static void task_numa_find_cpu(struct task_numa_env *env, long taskimp, long groupimp)
1984 {
1985     bool maymove = false;
1986     int cpu;
1987 
1988     /*
1989      * If dst node has spare capacity, then check if there is an
1990      * imbalance that would be overruled by the load balancer.
1991      */
1992     if (env->dst_stats.node_type == node_has_spare) {
1993         unsigned int imbalance;
1994         int src_running, dst_running;
1995 
1996         /*
1997          * Would movement cause an imbalance? Note that if src has
1998          * more running tasks that the imbalance is ignored as the
1999          * move improves the imbalance from the perspective of the
2000          * CPU load balancer.
2001          * */
2002         src_running = env->src_stats.nr_running - 1;
2003         dst_running = env->dst_stats.nr_running + 1;
2004         imbalance = max(0, dst_running - src_running);
2005         imbalance = adjust_numa_imbalance(imbalance, dst_running);
2006         /* Use idle CPU if there is no imbalance */
2007         if (!imbalance) {
2008             maymove = true;
2009             if (env->dst_stats.idle_cpu >= 0) {
2010                 env->dst_cpu = env->dst_stats.idle_cpu;
2011                 task_numa_assign(env, NULL, 0);
2012                 return;
2013             }
2014         }
2015     } else {
2016         long src_load, dst_load, load;
2017         /*
2018          * If the improvement from just moving env->p direction is better
2019          * than swapping tasks around, check if a move is possible.
2020          */
2021         load = task_h_load(env->p);
2022         dst_load = env->dst_stats.load + load;
2023         src_load = env->src_stats.load - load;
2024         maymove = !load_too_imbalanced(src_load, dst_load, env);
2025     }
2026 
2027     for_each_cpu(cpu, cpumask_of_node(env->dst_nid))
2028     {
2029         /* Skip this CPU if the source task cannot migrate */
2030         if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
2031             continue;
2032         }
2033 
2034         env->dst_cpu = cpu;
2035         if (task_numa_compare(env, taskimp, groupimp, maymove)) {
2036             break;
2037         }
2038     }
2039 }
2040 
task_numa_migrate(struct task_struct * p)2041 static int task_numa_migrate(struct task_struct *p)
2042 {
2043     struct task_numa_env env = {
2044         .p = p,
2045 
2046         .src_cpu = task_cpu(p),
2047         .src_nid = task_node(p),
2048 
2049         .imbalance_pct = 112,
2050 
2051         .best_task = NULL,
2052         .best_imp = 0,
2053         .best_cpu = -1,
2054     };
2055     unsigned long taskweight, groupweight;
2056     struct sched_domain *sd;
2057     long taskimp, groupimp;
2058     struct numa_group *ng;
2059     struct rq *best_rq;
2060     int nid, ret, dist;
2061 
2062     /*
2063      * Pick the lowest SD_NUMA domain, as that would have the smallest
2064      * imbalance and would be the first to start moving tasks about.
2065      *
2066      * And we want to avoid any moving of tasks about, as that would create
2067      * random movement of tasks -- counter the numa conditions we're trying
2068      * to satisfy here.
2069      */
2070     rcu_read_lock();
2071     sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
2072     if (sd) {
2073         env.imbalance_pct = FAIR_ONEHUNDRED + (sd->imbalance_pct - FAIR_ONEHUNDRED) / 0x2;
2074     }
2075     rcu_read_unlock();
2076 
2077     /*
2078      * Cpusets can break the scheduler domain tree into smaller
2079      * balance domains, some of which do not cross NUMA boundaries.
2080      * Tasks that are "trapped" in such domains cannot be migrated
2081      * elsewhere, so there is no point in (re)trying.
2082      */
2083     if (unlikely(!sd)) {
2084         sched_setnuma(p, task_node(p));
2085         return -EINVAL;
2086     }
2087 
2088     env.dst_nid = p->numa_preferred_nid;
2089     dist = env.dist = node_distance(env.src_nid, env.dst_nid);
2090     taskweight = task_weight(p, env.src_nid, dist);
2091     groupweight = group_weight(p, env.src_nid, dist);
2092     update_numa_stats(&env, &env.src_stats, env.src_nid, false);
2093     taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
2094     groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
2095     update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
2096 
2097     /* Try to find a spot on the preferred nid. */
2098     task_numa_find_cpu(&env, taskimp, groupimp);
2099 
2100     /*
2101      * Look at other nodes in these cases:
2102      * - there is no space available on the preferred_nid
2103      * - the task is part of a numa_group that is interleaved across
2104      *   multiple NUMA nodes; in order to better consolidate the group,
2105      *   we need to check other locations.
2106      */
2107     ng = deref_curr_numa_group(p);
2108     if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
2109         for_each_online_node(nid)
2110         {
2111             if (nid == env.src_nid || nid == p->numa_preferred_nid) {
2112                 continue;
2113             }
2114 
2115             dist = node_distance(env.src_nid, env.dst_nid);
2116             if (sched_numa_topology_type == NUMA_BACKPLANE && dist != env.dist) {
2117                 taskweight = task_weight(p, env.src_nid, dist);
2118                 groupweight = group_weight(p, env.src_nid, dist);
2119             }
2120 
2121             /* Only consider nodes where both task and groups benefit */
2122             taskimp = task_weight(p, nid, dist) - taskweight;
2123             groupimp = group_weight(p, nid, dist) - groupweight;
2124             if (taskimp < 0 && groupimp < 0) {
2125                 continue;
2126             }
2127 
2128             env.dist = dist;
2129             env.dst_nid = nid;
2130             update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
2131             task_numa_find_cpu(&env, taskimp, groupimp);
2132         }
2133     }
2134 
2135     /*
2136      * If the task is part of a workload that spans multiple NUMA nodes,
2137      * and is migrating into one of the workload's active nodes, remember
2138      * this node as the task's preferred numa node, so the workload can
2139      * settle down.
2140      * A task that migrated to a second choice node will be better off
2141      * trying for a better one later. Do not set the preferred node here.
2142      */
2143     if (ng) {
2144         if (env.best_cpu == -1) {
2145             nid = env.src_nid;
2146         } else {
2147             nid = cpu_to_node(env.best_cpu);
2148         }
2149 
2150         if (nid != p->numa_preferred_nid) {
2151             sched_setnuma(p, nid);
2152         }
2153     }
2154 
2155     /* No better CPU than the current one was found. */
2156     if (env.best_cpu == -1) {
2157         trace_sched_stick_numa(p, env.src_cpu, NULL, -1);
2158         return -EAGAIN;
2159     }
2160 
2161     best_rq = cpu_rq(env.best_cpu);
2162     if (env.best_task == NULL) {
2163         ret = migrate_task_to(p, env.best_cpu);
2164         WRITE_ONCE(best_rq->numa_migrate_on, 0);
2165         if (ret != 0) {
2166             trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu);
2167         }
2168         return ret;
2169     }
2170 
2171     ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
2172     WRITE_ONCE(best_rq->numa_migrate_on, 0);
2173 
2174     if (ret != 0) {
2175         trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu);
2176     }
2177     put_task_struct(env.best_task);
2178     return ret;
2179 }
2180 
2181 /* Attempt to migrate a task to a CPU on the preferred node. */
numa_migrate_preferred(struct task_struct * p)2182 static void numa_migrate_preferred(struct task_struct *p)
2183 {
2184     unsigned long interval = HZ;
2185 
2186     /* This task has no NUMA fault statistics yet */
2187     if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) {
2188         return;
2189     }
2190 
2191     /* Periodically retry migrating the task to the preferred node */
2192     interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 0x10);
2193     p->numa_migrate_retry = jiffies + interval;
2194 
2195     /* Success if task is already running on preferred CPU */
2196     if (task_node(p) == p->numa_preferred_nid) {
2197         return;
2198     }
2199 
2200     /* Otherwise, try migrate to a CPU on the preferred node */
2201     task_numa_migrate(p);
2202 }
2203 
2204 /*
2205  * Find out how many nodes on the workload is actively running on. Do this by
2206  * tracking the nodes from which NUMA hinting faults are triggered. This can
2207  * be different from the set of nodes where the workload's memory is currently
2208  * located.
2209  */
numa_group_count_active_nodes(struct numa_group * numa_group)2210 static void numa_group_count_active_nodes(struct numa_group *numa_group)
2211 {
2212     unsigned long faults, max_faults = 0;
2213     int nid, active_nodes = 0;
2214 
2215     for_each_online_node(nid)
2216     {
2217         faults = group_faults_cpu(numa_group, nid);
2218         if (faults > max_faults) {
2219             max_faults = faults;
2220         }
2221     }
2222 
2223     for_each_online_node(nid)
2224     {
2225         faults = group_faults_cpu(numa_group, nid);
2226         if (faults * ACTIVE_NODE_FRACTION > max_faults) {
2227             active_nodes++;
2228         }
2229     }
2230 
2231     numa_group->max_faults_cpu = max_faults;
2232     numa_group->active_nodes = active_nodes;
2233 }
2234 
2235 /*
2236  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
2237  * increments. The more local the fault statistics are, the higher the scan
2238  * period will be for the next scan window. If local/(local+remote) ratio is
2239  * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
2240  * the scan period will decrease. Aim for 70% local accesses.
2241  */
2242 #define NUMA_PERIOD_SLOTS 10
2243 #define NUMA_PERIOD_THRESHOLD 7
2244 
2245 /*
2246  * Increase the scan period (slow down scanning) if the majority of
2247  * our memory is already on our local node, or if the majority of
2248  * the page accesses are shared with other processes.
2249  * Otherwise, decrease the scan period.
2250  */
update_task_scan_period(struct task_struct * p,unsigned long shared,unsigned long private)2251 static void update_task_scan_period(struct task_struct *p, unsigned long shared, unsigned long private)
2252 {
2253     unsigned int period_slot;
2254     int lr_ratio, ps_ratio;
2255     int diff;
2256 
2257     unsigned long remote = p->numa_faults_locality[0];
2258     unsigned long local = p->numa_faults_locality[1];
2259 
2260     /*
2261      * If there were no record hinting faults then either the task is
2262      * completely idle or all activity is areas that are not of interest
2263      * to automatic numa balancing. Related to that, if there were failed
2264      * migration then it implies we are migrating too quickly or the local
2265      * node is overloaded. In either case, scan slower
2266      */
2267     if (local + shared == 0 || p->numa_faults_locality[0x2]) {
2268         p->numa_scan_period = min(p->numa_scan_period_max, p->numa_scan_period << 1);
2269 
2270         p->mm->numa_next_scan = jiffies + msecs_to_jiffies(p->numa_scan_period);
2271 
2272         return;
2273     }
2274 
2275     /*
2276      * Prepare to scale scan period relative to the current period.
2277      *     == NUMA_PERIOD_THRESHOLD scan period stays the same
2278      *       <  NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
2279      *     >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
2280      */
2281     period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
2282     lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
2283     ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared);
2284 
2285     if (ps_ratio >= NUMA_PERIOD_THRESHOLD) {
2286         /*
2287          * Most memory accesses are local. There is no need to
2288          * do fast NUMA scanning, since memory is already local.
2289          */
2290         int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
2291         if (!slot) {
2292             slot = 1;
2293         }
2294         diff = slot * period_slot;
2295     } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) {
2296         /*
2297          * Most memory accesses are shared with other tasks.
2298          * There is no point in continuing fast NUMA scanning,
2299          * since other tasks may just move the memory elsewhere.
2300          */
2301         int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
2302         if (!slot) {
2303             slot = 1;
2304         }
2305         diff = slot * period_slot;
2306     } else {
2307         /*
2308          * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
2309          * yet they are not on the local NUMA node. Speed up
2310          * NUMA scanning to get the memory moved over.
2311          */
2312         int ratio = max(lr_ratio, ps_ratio);
2313         diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
2314     }
2315 
2316     p->numa_scan_period = clamp(p->numa_scan_period + diff, task_scan_min(p), task_scan_max(p));
2317     memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2318 }
2319 
2320 /*
2321  * Get the fraction of time the task has been running since the last
2322  * NUMA placement cycle. The scheduler keeps similar statistics, but
2323  * decays those on a 32ms period, which is orders of magnitude off
2324  * from the dozens-of-seconds NUMA balancing period. Use the scheduler
2325  * stats only if the task is so new there are no NUMA statistics yet.
2326  */
numa_get_avg_runtime(struct task_struct * p,u64 * period)2327 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
2328 {
2329     u64 runtime, delta, now;
2330     /* Use the start of this time slice to avoid calculations. */
2331     now = p->se.exec_start;
2332     runtime = p->se.sum_exec_runtime;
2333 
2334     if (p->last_task_numa_placement) {
2335         delta = runtime - p->last_sum_exec_runtime;
2336         *period = now - p->last_task_numa_placement;
2337 
2338         /* Avoid time going backwards, prevent potential divide error: */
2339         if (unlikely((s64)*period < 0)) {
2340             *period = 0;
2341         }
2342     } else {
2343         delta = p->se.avg.load_sum;
2344         *period = LOAD_AVG_MAX;
2345     }
2346 
2347     p->last_sum_exec_runtime = runtime;
2348     p->last_task_numa_placement = now;
2349 
2350     return delta;
2351 }
2352 
2353 /*
2354  * Determine the preferred nid for a task in a numa_group. This needs to
2355  * be done in a way that produces consistent results with group_weight,
2356  * otherwise workloads might not converge.
2357  */
preferred_group_nid(struct task_struct * p,int nid)2358 static int preferred_group_nid(struct task_struct *p, int nid)
2359 {
2360     nodemask_t nodes;
2361     int dist;
2362 
2363     /* Direct connections between all NUMA nodes. */
2364     if (sched_numa_topology_type == NUMA_DIRECT) {
2365         return nid;
2366     }
2367 
2368     /*
2369      * On a system with glueless mesh NUMA topology, group_weight
2370      * scores nodes according to the number of NUMA hinting faults on
2371      * both the node itself, and on nearby nodes.
2372      */
2373     if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
2374         unsigned long score, max_score = 0;
2375         int node, max_node = nid;
2376 
2377         dist = sched_max_numa_distance;
2378 
2379         for_each_online_node(node)
2380         {
2381             score = group_weight(p, node, dist);
2382             if (score > max_score) {
2383                 max_score = score;
2384                 max_node = node;
2385             }
2386         }
2387         return max_node;
2388     }
2389 
2390     /*
2391      * Finding the preferred nid in a system with NUMA backplane
2392      * interconnect topology is more involved. The goal is to locate
2393      * tasks from numa_groups near each other in the system, and
2394      * untangle workloads from different sides of the system. This requires
2395      * searching down the hierarchy of node groups, recursively searching
2396      * inside the highest scoring group of nodes. The nodemask tricks
2397      * keep the complexity of the search down.
2398      */
2399     nodes = node_online_map;
2400     for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
2401         unsigned long max_faults = 0;
2402         nodemask_t max_group = NODE_MASK_NONE;
2403         int a, b;
2404 
2405         /* Are there nodes at this distance from each other? */
2406         if (!find_numa_distance(dist)) {
2407             continue;
2408         }
2409 
2410         for_each_node_mask(a, nodes)
2411         {
2412             unsigned long faults = 0;
2413             nodemask_t this_group;
2414             nodes_clear(this_group);
2415 
2416             /* Sum group's NUMA faults; includes a==b case. */
2417             for_each_node_mask(b, nodes)
2418             {
2419                 if (node_distance(a, b) < dist) {
2420                     faults += group_faults(p, b);
2421                     node_set(b, this_group);
2422                     node_clear(b, nodes);
2423                 }
2424             }
2425 
2426             /* Remember the top group. */
2427             if (faults > max_faults) {
2428                 max_faults = faults;
2429                 max_group = this_group;
2430                 /*
2431                  * subtle: at the smallest distance there is
2432                  * just one node left in each "group", the
2433                  * winner is the preferred nid.
2434                  */
2435                 nid = a;
2436             }
2437         }
2438         /* Next round, evaluate the nodes within max_group. */
2439         if (!max_faults) {
2440             break;
2441         }
2442         nodes = max_group;
2443     }
2444     return nid;
2445 }
2446 
task_numa_placement(struct task_struct * p)2447 static void task_numa_placement(struct task_struct *p)
2448 {
2449     int seq, nid, max_nid = NUMA_NO_NODE;
2450     unsigned long max_faults = 0;
2451     unsigned long fault_types[2] = {0, 0};
2452     unsigned long total_faults;
2453     u64 runtime, period;
2454     spinlock_t *group_lock = NULL;
2455     struct numa_group *ng;
2456 
2457     /*
2458      * The p->mm->numa_scan_seq field gets updated without
2459      * exclusive access. Use READ_ONCE() here to ensure
2460      * that the field is read in a single access:
2461      */
2462     seq = READ_ONCE(p->mm->numa_scan_seq);
2463     if (p->numa_scan_seq == seq) {
2464         return;
2465     }
2466     p->numa_scan_seq = seq;
2467     p->numa_scan_period_max = task_scan_max(p);
2468 
2469     total_faults = p->numa_faults_locality[0] + p->numa_faults_locality[1];
2470     runtime = numa_get_avg_runtime(p, &period);
2471 
2472     /* If the task is part of a group prevent parallel updates to group stats */
2473     ng = deref_curr_numa_group(p);
2474     if (ng) {
2475         group_lock = &ng->lock;
2476         spin_lock_irq(group_lock);
2477     }
2478 
2479     /* Find the node with the highest number of faults */
2480     for_each_online_node(nid)
2481     {
2482         /* Keep track of the offsets in numa_faults array */
2483         int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
2484         unsigned long faults = 0, group_faults = 0;
2485         int priv;
2486 
2487         for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
2488             long diff, f_diff, f_weight;
2489 
2490             mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2491             membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2492             cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2493             cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
2494 
2495             /* Decay existing window, copy faults since last scan */
2496             diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 0x2;
2497             fault_types[priv] += p->numa_faults[membuf_idx];
2498             p->numa_faults[membuf_idx] = 0;
2499 
2500             /*
2501              * Normalize the faults_from, so all tasks in a group
2502              * count according to CPU use, instead of by the raw
2503              * number of faults. Tasks with little runtime have
2504              * little over-all impact on throughput, and thus their
2505              * faults are less important.
2506              */
2507             f_weight = div64_u64(runtime << 0x10, period + 1);
2508             f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / (total_faults + 1);
2509             f_diff = f_weight - p->numa_faults[cpu_idx] / 0x2;
2510             p->numa_faults[cpubuf_idx] = 0;
2511 
2512             p->numa_faults[mem_idx] += diff;
2513             p->numa_faults[cpu_idx] += f_diff;
2514             faults += p->numa_faults[mem_idx];
2515             p->total_numa_faults += diff;
2516             if (ng) {
2517                 /*
2518                  * safe because we can only change our own group
2519                  *
2520                  * mem_idx represents the offset for a given
2521                  * nid and priv in a specific region because it
2522                  * is at the beginning of the numa_faults array.
2523                  */
2524                 ng->faults[mem_idx] += diff;
2525                 ng->faults_cpu[mem_idx] += f_diff;
2526                 ng->total_faults += diff;
2527                 group_faults += ng->faults[mem_idx];
2528             }
2529         }
2530 
2531         if (!ng) {
2532             if (faults > max_faults) {
2533                 max_faults = faults;
2534                 max_nid = nid;
2535             }
2536         } else if (group_faults > max_faults) {
2537             max_faults = group_faults;
2538             max_nid = nid;
2539         }
2540     }
2541 
2542     if (ng) {
2543         numa_group_count_active_nodes(ng);
2544         spin_unlock_irq(group_lock);
2545         max_nid = preferred_group_nid(p, max_nid);
2546     }
2547 
2548     if (max_faults) {
2549         /* Set the new preferred node */
2550         if (max_nid != p->numa_preferred_nid) {
2551             sched_setnuma(p, max_nid);
2552         }
2553     }
2554 
2555     update_task_scan_period(p, fault_types[0], fault_types[1]);
2556 }
2557 
get_numa_group(struct numa_group * grp)2558 static inline int get_numa_group(struct numa_group *grp)
2559 {
2560     return refcount_inc_not_zero(&grp->refcount);
2561 }
2562 
put_numa_group(struct numa_group * grp)2563 static inline void put_numa_group(struct numa_group *grp)
2564 {
2565     if (refcount_dec_and_test(&grp->refcount)) {
2566         kfree_rcu(grp, rcu);
2567     }
2568 }
2569 
task_numa_group(struct task_struct * p,int cpupid,int flags,int * priv)2570 static void task_numa_group(struct task_struct *p, int cpupid, int flags, int *priv)
2571 {
2572     struct numa_group *grp, *my_grp;
2573     struct task_struct *tsk;
2574     bool join = false;
2575     int cpu = cpupid_to_cpu(cpupid);
2576     int i;
2577 
2578     if (unlikely(!deref_curr_numa_group(p))) {
2579         unsigned int size = sizeof(struct numa_group) + 0x4 * nr_node_ids * sizeof(unsigned long);
2580 
2581         grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2582         if (!grp) {
2583             return;
2584         }
2585 
2586         refcount_set(&grp->refcount, 1);
2587         grp->active_nodes = 1;
2588         grp->max_faults_cpu = 0;
2589         spin_lock_init(&grp->lock);
2590         grp->gid = p->pid;
2591         /* Second half of the array tracks nids where faults happen */
2592         grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES * nr_node_ids;
2593 
2594         for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2595             grp->faults[i] = p->numa_faults[i];
2596         }
2597 
2598         grp->total_faults = p->total_numa_faults;
2599 
2600         grp->nr_tasks++;
2601         rcu_assign_pointer(p->numa_group, grp);
2602     }
2603 
2604     rcu_read_lock();
2605     tsk = READ_ONCE(cpu_rq(cpu)->curr);
2606     if (!cpupid_match_pid(tsk, cpupid)) {
2607         goto no_join;
2608     }
2609 
2610     grp = rcu_dereference(tsk->numa_group);
2611     if (!grp) {
2612         goto no_join;
2613     }
2614 
2615     my_grp = deref_curr_numa_group(p);
2616     if (grp == my_grp) {
2617         goto no_join;
2618     }
2619 
2620     /*
2621      * Only join the other group if its bigger; if we're the bigger group,
2622      * the other task will join us.
2623      */
2624     if (my_grp->nr_tasks > grp->nr_tasks) {
2625         goto no_join;
2626     }
2627 
2628     /*
2629      * Tie-break on the grp address.
2630      */
2631     if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) {
2632         goto no_join;
2633     }
2634 
2635     /* Always join threads in the same process. */
2636     if (tsk->mm == current->mm) {
2637         join = true;
2638     }
2639 
2640     /* Simple filter to avoid false positives due to PID collisions */
2641     if (flags & TNF_SHARED) {
2642         join = true;
2643     }
2644 
2645     /* Update priv based on whether false sharing was detected */
2646     *priv = !join;
2647 
2648     if (join && !get_numa_group(grp)) {
2649         goto no_join;
2650     }
2651 
2652     rcu_read_unlock();
2653 
2654     if (!join) {
2655         return;
2656     }
2657 
2658     BUG_ON(irqs_disabled());
2659     double_lock_irq(&my_grp->lock, &grp->lock);
2660 
2661     for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2662         my_grp->faults[i] -= p->numa_faults[i];
2663         grp->faults[i] += p->numa_faults[i];
2664     }
2665     my_grp->total_faults -= p->total_numa_faults;
2666     grp->total_faults += p->total_numa_faults;
2667 
2668     my_grp->nr_tasks--;
2669     grp->nr_tasks++;
2670 
2671     spin_unlock(&my_grp->lock);
2672     spin_unlock_irq(&grp->lock);
2673 
2674     rcu_assign_pointer(p->numa_group, grp);
2675 
2676     put_numa_group(my_grp);
2677     return;
2678 
2679 no_join:
2680     rcu_read_unlock();
2681     return;
2682 }
2683 
2684 /*
2685  * Get rid of NUMA staticstics associated with a task (either current or dead).
2686  * If @final is set, the task is dead and has reached refcount zero, so we can
2687  * safely free all relevant data structures. Otherwise, there might be
2688  * concurrent reads from places like load balancing and procfs, and we should
2689  * reset the data back to default state without freeing ->numa_faults.
2690  */
task_numa_free(struct task_struct * p,bool final)2691 void task_numa_free(struct task_struct *p, bool final)
2692 {
2693     /* safe: p either is current or is being freed by current */
2694     struct numa_group *grp = rcu_dereference_raw(p->numa_group);
2695     unsigned long *numa_faults = p->numa_faults;
2696     unsigned long flags;
2697     int i;
2698 
2699     if (!numa_faults) {
2700         return;
2701     }
2702 
2703     if (grp) {
2704         spin_lock_irqsave(&grp->lock, flags);
2705         for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2706             grp->faults[i] -= p->numa_faults[i];
2707         }
2708         grp->total_faults -= p->total_numa_faults;
2709 
2710         grp->nr_tasks--;
2711         spin_unlock_irqrestore(&grp->lock, flags);
2712         RCU_INIT_POINTER(p->numa_group, NULL);
2713         put_numa_group(grp);
2714     }
2715 
2716     if (final) {
2717         p->numa_faults = NULL;
2718         kfree(numa_faults);
2719     } else {
2720         p->total_numa_faults = 0;
2721         for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2722             numa_faults[i] = 0;
2723         }
2724     }
2725 }
2726 
2727 /*
2728  * Got a PROT_NONE fault for a page on @node.
2729  */
task_numa_fault(int last_cpupid,int mem_node,int pages,int flags)2730 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2731 {
2732     struct task_struct *p = current;
2733     bool migrated = flags & TNF_MIGRATED;
2734     int cpu_node = task_node(current);
2735     int local = !!(flags & TNF_FAULT_LOCAL);
2736     struct numa_group *ng;
2737     int priv;
2738 
2739     if (!static_branch_likely(&sched_numa_balancing)) {
2740         return;
2741     }
2742 
2743     /* for example, ksmd faulting in a user's mm */
2744     if (!p->mm) {
2745         return;
2746     }
2747 
2748     /* Allocate buffer to track faults on a per-node basis */
2749     if (unlikely(!p->numa_faults)) {
2750         int size = sizeof(*p->numa_faults) * NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2751 
2752         p->numa_faults = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2753         if (!p->numa_faults) {
2754             return;
2755         }
2756 
2757         p->total_numa_faults = 0;
2758         memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2759     }
2760 
2761     /*
2762      * First accesses are treated as private, otherwise consider accesses
2763      * to be private if the accessing pid has not changed
2764      */
2765     if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2766         priv = 1;
2767     } else {
2768         priv = cpupid_match_pid(p, last_cpupid);
2769         if (!priv && !(flags & TNF_NO_GROUP)) {
2770             task_numa_group(p, last_cpupid, flags, &priv);
2771         }
2772     }
2773 
2774     /*
2775      * If a workload spans multiple NUMA nodes, a shared fault that
2776      * occurs wholly within the set of nodes that the workload is
2777      * actively using should be counted as local. This allows the
2778      * scan rate to slow down when a workload has settled down.
2779      */
2780     ng = deref_curr_numa_group(p);
2781     if (!priv && !local && ng && ng->active_nodes > 1 && numa_is_active_node(cpu_node, ng) &&
2782         numa_is_active_node(mem_node, ng)) {
2783         local = 1;
2784     }
2785 
2786     /*
2787      * Retry to migrate task to preferred node periodically, in case it
2788      * previously failed, or the scheduler moved us.
2789      */
2790     if (time_after(jiffies, p->numa_migrate_retry)) {
2791         task_numa_placement(p);
2792         numa_migrate_preferred(p);
2793     }
2794 
2795     if (migrated) {
2796         p->numa_pages_migrated += pages;
2797     }
2798     if (flags & TNF_MIGRATE_FAIL) {
2799         p->numa_faults_locality[0x2] += pages;
2800     }
2801 
2802     p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2803     p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2804     p->numa_faults_locality[local] += pages;
2805 }
2806 
reset_ptenuma_scan(struct task_struct * p)2807 static void reset_ptenuma_scan(struct task_struct *p)
2808 {
2809     /*
2810      * We only did a read acquisition of the mmap sem, so
2811      * p->mm->numa_scan_seq is written to without exclusive access
2812      * and the update is not guaranteed to be atomic. That's not
2813      * much of an issue though, since this is just used for
2814      * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2815      * expensive, to avoid any form of compiler optimizations:
2816      */
2817     WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
2818     p->mm->numa_scan_offset = 0;
2819 }
2820 
2821 /*
2822  * The expensive part of numa migration is done from task_work context.
2823  * Triggered from task_tick_numa().
2824  */
task_numa_work(struct callback_head * work)2825 static void task_numa_work(struct callback_head *work)
2826 {
2827     unsigned long migrate, next_scan, now = jiffies;
2828     struct task_struct *p = current;
2829     struct mm_struct *mm = p->mm;
2830     u64 runtime = p->se.sum_exec_runtime;
2831     struct vm_area_struct *vma;
2832     unsigned long start, end;
2833     unsigned long nr_pte_updates = 0;
2834     long pages, virtpages;
2835 
2836     SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
2837 
2838     work->next = work;
2839     /*
2840      * Who cares about NUMA placement when they're dying.
2841      *
2842      * NOTE: make sure not to dereference p->mm before this check,
2843      * exit_task_work() happens _after_ exit_mm() so we could be called
2844      * without p->mm even though we still had it when we enqueued this
2845      * work.
2846      */
2847     if (p->flags & PF_EXITING) {
2848         return;
2849     }
2850 
2851     if (!mm->numa_next_scan) {
2852         mm->numa_next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2853     }
2854 
2855     /*
2856      * Enforce maximal scan/migration frequency..
2857      */
2858     migrate = mm->numa_next_scan;
2859     if (time_before(now, migrate)) {
2860         return;
2861     }
2862 
2863     if (p->numa_scan_period == 0) {
2864         p->numa_scan_period_max = task_scan_max(p);
2865         p->numa_scan_period = task_scan_start(p);
2866     }
2867 
2868     next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2869     if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) {
2870         return;
2871     }
2872 
2873     /*
2874      * Delay this task enough that another task of this mm will likely win
2875      * the next time around.
2876      */
2877     p->node_stamp += 0x2 * TICK_NSEC;
2878 
2879     start = mm->numa_scan_offset;
2880     pages = sysctl_numa_balancing_scan_size;
2881     pages <<= FAIR_TWENTY - PAGE_SHIFT; /* MB in pages */
2882     virtpages = pages * 0x8;            /* Scan up to this much virtual space */
2883     if (!pages) {
2884         return;
2885     }
2886 
2887     if (!mmap_read_trylock(mm)) {
2888         return;
2889     }
2890     vma = find_vma(mm, start);
2891     if (!vma) {
2892         reset_ptenuma_scan(p);
2893         start = 0;
2894         vma = mm->mmap;
2895     }
2896     for (; vma; vma = vma->vm_next) {
2897         if (!vma_migratable(vma) || !vma_policy_mof(vma) || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
2898             continue;
2899         }
2900 
2901         /*
2902          * Shared library pages mapped by multiple processes are not
2903          * migrated as it is expected they are cache replicated. Avoid
2904          * hinting faults in read-only file-backed mappings or the vdso
2905          * as migrating the pages will be of marginal benefit.
2906          */
2907         if (!vma->vm_mm || (vma->vm_file && (vma->vm_flags & (VM_READ | VM_WRITE)) == (VM_READ))) {
2908             continue;
2909         }
2910 
2911         /*
2912          * Skip inaccessible VMAs to avoid any confusion between
2913          * PROT_NONE and NUMA hinting ptes
2914          */
2915         if (!vma_is_accessible(vma)) {
2916             continue;
2917         }
2918 
2919         do {
2920             start = max(start, vma->vm_start);
2921             end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2922             end = min(end, vma->vm_end);
2923             nr_pte_updates = change_prot_numa(vma, start, end);
2924             /*
2925              * Try to scan sysctl_numa_balancing_size worth of
2926              * hpages that have at least one present PTE that
2927              * is not already pte-numa. If the VMA contains
2928              * areas that are unused or already full of prot_numa
2929              * PTEs, scan up to virtpages, to skip through those
2930              * areas faster.
2931              */
2932             if (nr_pte_updates) {
2933                 pages -= (end - start) >> PAGE_SHIFT;
2934             }
2935             virtpages -= (end - start) >> PAGE_SHIFT;
2936 
2937             start = end;
2938             if (pages <= 0 || virtpages <= 0) {
2939                 goto out;
2940             }
2941 
2942             cond_resched();
2943         } while (end != vma->vm_end);
2944     }
2945 
2946 out:
2947     /*
2948      * It is possible to reach the end of the VMA list but the last few
2949      * VMAs are not guaranteed to the vma_migratable. If they are not, we
2950      * would find the !migratable VMA on the next scan but not reset the
2951      * scanner to the start so check it now.
2952      */
2953     if (vma) {
2954         mm->numa_scan_offset = start;
2955     } else {
2956         reset_ptenuma_scan(p);
2957     }
2958     mmap_read_unlock(mm);
2959 
2960     /*
2961      * Make sure tasks use at least 32x as much time to run other code
2962      * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2963      * Usually update_task_scan_period slows down scanning enough; on an
2964      * overloaded system we need to limit overhead on a per task basis.
2965      */
2966     if (unlikely(p->se.sum_exec_runtime != runtime)) {
2967         u64 diff = p->se.sum_exec_runtime - runtime;
2968         p->node_stamp += 0x20 * diff;
2969     }
2970 }
2971 
init_numa_balancing(unsigned long clone_flags,struct task_struct * p)2972 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
2973 {
2974     int mm_users = 0;
2975     struct mm_struct *mm = p->mm;
2976 
2977     if (mm) {
2978         mm_users = atomic_read(&mm->mm_users);
2979         if (mm_users == 1) {
2980             mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2981             mm->numa_scan_seq = 0;
2982         }
2983     }
2984     p->node_stamp = 0;
2985     p->numa_scan_seq = mm ? mm->numa_scan_seq : 0;
2986     p->numa_scan_period = sysctl_numa_balancing_scan_delay;
2987     /* Protect against double add, see task_tick_numa and task_numa_work */
2988     p->numa_work.next = &p->numa_work;
2989     p->numa_faults = NULL;
2990     RCU_INIT_POINTER(p->numa_group, NULL);
2991     p->last_task_numa_placement = 0;
2992     p->last_sum_exec_runtime = 0;
2993 
2994     init_task_work(&p->numa_work, task_numa_work);
2995 
2996     /* New address space, reset the preferred nid */
2997     if (!(clone_flags & CLONE_VM)) {
2998         p->numa_preferred_nid = NUMA_NO_NODE;
2999         return;
3000     }
3001 
3002     /*
3003      * New thread, keep existing numa_preferred_nid which should be copied
3004      * already by arch_dup_task_struct but stagger when scans start.
3005      */
3006     if (mm) {
3007         unsigned int delay;
3008 
3009         delay = min_t(unsigned int, task_scan_max(current), current->numa_scan_period *mm_users *NSEC_PER_MSEC);
3010         delay += 0x2 * TICK_NSEC;
3011         p->node_stamp = delay;
3012     }
3013 }
3014 
3015 /*
3016  * Drive the periodic memory faults..
3017  */
task_tick_numa(struct rq * rq,struct task_struct * curr)3018 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3019 {
3020     struct callback_head *work = &curr->numa_work;
3021     u64 period, now;
3022 
3023     /*
3024      * We don't care about NUMA placement if we don't have memory.
3025      */
3026     if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) {
3027         return;
3028     }
3029 
3030     /*
3031      * Using runtime rather than walltime has the dual advantage that
3032      * we (mostly) drive the selection from busy threads and that the
3033      * task needs to have done some actual work before we bother with
3034      * NUMA placement.
3035      */
3036     now = curr->se.sum_exec_runtime;
3037     period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
3038 
3039     if (now > curr->node_stamp + period) {
3040         if (!curr->node_stamp) {
3041             curr->numa_scan_period = task_scan_start(curr);
3042         }
3043         curr->node_stamp += period;
3044 
3045         if (!time_before(jiffies, curr->mm->numa_next_scan)) {
3046             task_work_add(curr, work, TWA_RESUME);
3047         }
3048     }
3049 }
3050 
update_scan_period(struct task_struct * p,int new_cpu)3051 static void update_scan_period(struct task_struct *p, int new_cpu)
3052 {
3053     int src_nid = cpu_to_node(task_cpu(p));
3054     int dst_nid = cpu_to_node(new_cpu);
3055 
3056     if (!static_branch_likely(&sched_numa_balancing)) {
3057         return;
3058     }
3059 
3060     if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) {
3061         return;
3062     }
3063 
3064     if (src_nid == dst_nid) {
3065         return;
3066     }
3067 
3068     /*
3069      * Allow resets if faults have been trapped before one scan
3070      * has completed. This is most likely due to a new task that
3071      * is pulled cross-node due to wakeups or load balancing.
3072      */
3073     if (p->numa_scan_seq) {
3074         /*
3075          * Avoid scan adjustments if moving to the preferred
3076          * node or if the task was not previously running on
3077          * the preferred node.
3078          */
3079         if (dst_nid == p->numa_preferred_nid ||
3080             (p->numa_preferred_nid != NUMA_NO_NODE && src_nid != p->numa_preferred_nid)) {
3081             return;
3082         }
3083     }
3084 
3085     p->numa_scan_period = task_scan_start(p);
3086 }
3087 
3088 #else
task_tick_numa(struct rq * rq,struct task_struct * curr)3089 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3090 {
3091 }
3092 
account_numa_enqueue(struct rq * rq,struct task_struct * p)3093 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
3094 {
3095 }
3096 
account_numa_dequeue(struct rq * rq,struct task_struct * p)3097 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
3098 {
3099 }
3100 
update_scan_period(struct task_struct * p,int new_cpu)3101 static inline void update_scan_period(struct task_struct *p, int new_cpu)
3102 {
3103 }
3104 
3105 #endif /* CONFIG_NUMA_BALANCING */
3106 
account_entity_enqueue(struct cfs_rq * cfs_rq,struct sched_entity * se)3107 static void account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3108 {
3109     update_load_add(&cfs_rq->load, se->load.weight);
3110 #ifdef CONFIG_SMP
3111     if (entity_is_task(se)) {
3112         struct rq *rq = rq_of(cfs_rq);
3113 
3114         account_numa_enqueue(rq, task_of(se));
3115         list_add(&se->group_node, &rq->cfs_tasks);
3116     }
3117 #endif
3118     cfs_rq->nr_running++;
3119 }
3120 
account_entity_dequeue(struct cfs_rq * cfs_rq,struct sched_entity * se)3121 static void account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3122 {
3123     update_load_sub(&cfs_rq->load, se->load.weight);
3124 #ifdef CONFIG_SMP
3125     if (entity_is_task(se)) {
3126         account_numa_dequeue(rq_of(cfs_rq), task_of(se));
3127         list_del_init(&se->group_node);
3128     }
3129 #endif
3130     cfs_rq->nr_running--;
3131 }
3132 
3133 /*
3134  * Signed add and clamp on underflow.
3135  *
3136  * Explicitly do a load-store to ensure the intermediate value never hits
3137  * memory. This allows lockless observations without ever seeing the negative
3138  * values.
3139  */
3140 #define add_positive(_ptr, _val)                                                                                       \
3141     do {                                                                                                               \
3142         typeof(_ptr) ptr = (_ptr);                                                                                     \
3143         typeof(_val) val = (_val);                                                                                     \
3144         typeof(*ptr) res, var = READ_ONCE(*ptr);                                                                       \
3145                                                                                                                        \
3146         res = var + val;                                                                                               \
3147                                                                                                                        \
3148         if (val < 0 && res > var)                                                                                      \
3149             res = 0;                                                                                                   \
3150                                                                                                                        \
3151         WRITE_ONCE(*ptr, res);                                                                                         \
3152     } while (0)
3153 
3154 /*
3155  * Unsigned subtract and clamp on underflow.
3156  *
3157  * Explicitly do a load-store to ensure the intermediate value never hits
3158  * memory. This allows lockless observations without ever seeing the negative
3159  * values.
3160  */
3161 #define sub_positive(_ptr, _val)                                                                                       \
3162     do {                                                                                                               \
3163         typeof(_ptr) ptr = (_ptr);                                                                                     \
3164         typeof(*ptr) val = (_val);                                                                                     \
3165         typeof(*ptr) res, var = READ_ONCE(*ptr);                                                                       \
3166         res = var - val;                                                                                               \
3167         if (res > var)                                                                                                 \
3168             res = 0;                                                                                                   \
3169         WRITE_ONCE(*ptr, res);                                                                                         \
3170     } while (0)
3171 
3172 /*
3173  * Remove and clamp on negative, from a local variable.
3174  *
3175  * A variant of sub_positive(), which does not use explicit load-store
3176  * and is thus optimized for local variable updates.
3177  */
3178 #define lsub_positive(_ptr, _val)                                                                                      \
3179     do {                                                                                                               \
3180         typeof(_ptr) ptr = (_ptr);                                                                                     \
3181         *ptr -= min_t(typeof(*ptr), *ptr, _val);                                                                       \
3182     } while (0)
3183 
3184 #ifdef CONFIG_SMP
enqueue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3185 static inline void enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3186 {
3187     cfs_rq->avg.load_avg += se->avg.load_avg;
3188     cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
3189 }
3190 
dequeue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3191 static inline void dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3192 {
3193     sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3194     sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3195 }
3196 #else
enqueue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3197 static inline void enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3198 {
3199 }
dequeue_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3200 static inline void dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3201 {
3202 }
3203 #endif
3204 
reweight_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,unsigned long weight)3205 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight)
3206 {
3207     if (se->on_rq) {
3208         /* commit outstanding execution time */
3209         if (cfs_rq->curr == se) {
3210             update_curr(cfs_rq);
3211         }
3212         update_load_sub(&cfs_rq->load, se->load.weight);
3213     }
3214     dequeue_load_avg(cfs_rq, se);
3215 
3216     update_load_set(&se->load, weight);
3217 
3218 #ifdef CONFIG_SMP
3219     do {
3220         u32 divider = get_pelt_divider(&se->avg);
3221 
3222         se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
3223     } while (0);
3224 #endif
3225 
3226     enqueue_load_avg(cfs_rq, se);
3227     if (se->on_rq) {
3228         update_load_add(&cfs_rq->load, se->load.weight);
3229     }
3230 }
3231 
reweight_task(struct task_struct * p,int prio)3232 void reweight_task(struct task_struct *p, int prio)
3233 {
3234     struct sched_entity *se = &p->se;
3235     struct cfs_rq *cfs_rq = cfs_rq_of(se);
3236     struct load_weight *load = &se->load;
3237     unsigned long weight = scale_load(sched_prio_to_weight[prio]);
3238 
3239     reweight_entity(cfs_rq, se, weight);
3240     load->inv_weight = sched_prio_to_wmult[prio];
3241 }
3242 
3243 #ifdef CONFIG_FAIR_GROUP_SCHED
3244 #ifdef CONFIG_SMP
3245 /*
3246  * All this does is approximate the hierarchical proportion which includes that
3247  * global sum we all love to hate.
3248  *
3249  * That is, the weight of a group entity, is the proportional share of the
3250  * group weight based on the group runqueue weights. That is:
3251  *
3252  *                     tg->weight * grq->load.weight
3253  *   ge->load.weight = -----------------------------               (1)
3254  *                       \Sum grq->load.weight
3255  *
3256  * Now, because computing that sum is prohibitively expensive to compute (been
3257  * there, done that) we approximate it with this average stuff. The average
3258  * moves slower and therefore the approximation is cheaper and more stable.
3259  *
3260  * So instead of the above, we substitute:
3261  *
3262  *   grq->load.weight -> grq->avg.load_avg                         (2)
3263  *
3264  * which yields the following
3265  *
3266  *                     tg->weight * grq->avg.load_avg
3267  *   ge->load.weight = ------------------------------              (3)
3268  *                             tg->load_avg
3269  *
3270  * Where: tg->load_avg ~= \Sum grq->avg.load_avg
3271  *
3272  * That is shares_avg, and it is right (given the approximation (2)).
3273  *
3274  * The problem with it is that because the average is slow -- it was designed
3275  * to be exactly that of course -- this leads to transients in boundary
3276  * conditions. In specific, the case where the group was idle and we start the
3277  * one task. It takes time for our CPU's grq->avg.load_avg to build up,
3278  * yielding bad latency etc..
3279  *
3280  * Now, in that special case (1) reduces to:
3281  *
3282  *                     tg->weight * grq->load.weight
3283  *   ge->load.weight = ----------------------------- = tg->weight   (4)
3284  *                         grp->load.weight
3285  *
3286  * That is, the sum collapses because all other CPUs are idle; the UP scenario.
3287  *
3288  * So what we do is modify our approximation (3) to approach (4) in the (near)
3289  * UP case, like
3290  *
3291  *   ge->load.weight =
3292  *
3293  *              tg->weight * grq->load.weight
3294  *     ---------------------------------------------------         (5)
3295  *     tg->load_avg - grq->avg.load_avg + grq->load.weight
3296  *
3297  * But because grq->load.weight can drop to 0, resulting in a divide by zero,
3298  * we need to use grq->avg.load_avg as its lower bound, which then gives:
3299  *
3300  *
3301  *                     tg->weight * grq->load.weight
3302  *   ge->load.weight = -----------------------------           (6)
3303  *                             tg_load_avg'
3304  *
3305  * Where
3306  *
3307  *   tg_load_avg' = tg->load_avg - grq->avg.load_avg +
3308  *                  max(grq->load.weight, grq->avg.load_avg)
3309  *
3310  * And that is shares_weight and is icky. In the (near) UP case it approaches
3311  * (4) while in the normal case it approaches (3). It consistently
3312  * overestimates the ge->load.weight and therefore:
3313  *
3314  *   \Sum ge->load.weight >= tg->weight
3315  *
3316  * hence icky!
3317  */
calc_group_shares(struct cfs_rq * cfs_rq)3318 static long calc_group_shares(struct cfs_rq *cfs_rq)
3319 {
3320     long tg_weight, tg_shares, load, shares;
3321     struct task_group *tg = cfs_rq->tg;
3322 
3323     tg_shares = READ_ONCE(tg->shares);
3324 
3325     load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
3326 
3327     tg_weight = atomic_long_read(&tg->load_avg);
3328 
3329     /* Ensure tg_weight >= load */
3330     tg_weight -= cfs_rq->tg_load_avg_contrib;
3331     tg_weight += load;
3332 
3333     shares = (tg_shares * load);
3334     if (tg_weight) {
3335         shares /= tg_weight;
3336     }
3337 
3338     /*
3339      * MIN_SHARES has to be unscaled here to support per-CPU partitioning
3340      * of a group with small tg->shares value. It is a floor value which is
3341      * assigned as a minimum load.weight to the sched_entity representing
3342      * the group on a CPU.
3343      *
3344      * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
3345      * on an 8-core system with 8 tasks each runnable on one CPU shares has
3346      * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
3347      * case no task is runnable on a CPU MIN_SHARES=2 should be returned
3348      * instead of 0.
3349      */
3350     return clamp_t(long, shares, MIN_SHARES, tg_shares);
3351 }
3352 #endif /* CONFIG_SMP */
3353 
3354 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3355 
3356 /*
3357  * Recomputes the group entity based on the current state of its group
3358  * runqueue.
3359  */
update_cfs_group(struct sched_entity * se)3360 static void update_cfs_group(struct sched_entity *se)
3361 {
3362     struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3363     long shares;
3364 
3365     if (!gcfs_rq) {
3366         return;
3367     }
3368 
3369     if (throttled_hierarchy(gcfs_rq)) {
3370         return;
3371     }
3372 
3373 #ifndef CONFIG_SMP
3374     shares = READ_ONCE(gcfs_rq->tg->shares);
3375     if (likely(se->load.weight == shares)) {
3376         return;
3377     }
3378 #else
3379     shares = calc_group_shares(gcfs_rq);
3380 #endif
3381 
3382     reweight_entity(cfs_rq_of(se), se, shares);
3383 }
3384 
3385 #else  /* CONFIG_FAIR_GROUP_SCHED */
update_cfs_group(struct sched_entity * se)3386 static inline void update_cfs_group(struct sched_entity *se)
3387 {
3388 }
3389 #endif /* CONFIG_FAIR_GROUP_SCHED */
3390 
cfs_rq_util_change(struct cfs_rq * cfs_rq,int flags)3391 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
3392 {
3393     struct rq *rq = rq_of(cfs_rq);
3394 
3395     if (&rq->cfs == cfs_rq) {
3396         /*
3397          * There are a few boundary cases this might miss but it should
3398          * get called often enough that that should (hopefully) not be
3399          * a real problem.
3400          *
3401          * It will not get called when we go idle, because the idle
3402          * thread is a different class (!fair), nor will the utilization
3403          * number include things like RT tasks.
3404          *
3405          * As is, the util number is not freq-invariant (we'd have to
3406          * implement arch_scale_freq_capacity() for that).
3407          *
3408          * See cpu_util().
3409          */
3410         cpufreq_update_util(rq, flags);
3411     }
3412 }
3413 
3414 #ifdef CONFIG_SMP
3415 #ifdef CONFIG_FAIR_GROUP_SCHED
3416 /**
3417  * update_tg_load_avg - update the tg's load avg
3418  * @cfs_rq: the cfs_rq whose avg changed
3419  *
3420  * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
3421  * However, because tg->load_avg is a global value there are performance
3422  * considerations.
3423  *
3424  * In order to avoid having to look at the other cfs_rq's, we use a
3425  * differential update where we store the last value we propagated. This in
3426  * turn allows skipping updates if the differential is 'small'.
3427  *
3428  * Updating tg's load_avg is necessary before update_cfs_share().
3429  */
update_tg_load_avg(struct cfs_rq * cfs_rq)3430 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
3431 {
3432     long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
3433 
3434     /*
3435      * No need to update load_avg for root_task_group as it is not used.
3436      */
3437     if (cfs_rq->tg == &root_task_group) {
3438         return;
3439     }
3440 
3441     if (abs(delta) > cfs_rq->tg_load_avg_contrib / 0x40) {
3442         atomic_long_add(delta, &cfs_rq->tg->load_avg);
3443         cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
3444     }
3445 }
3446 
3447 /*
3448  * Called within set_task_rq() right before setting a task's CPU. The
3449  * caller only guarantees p->pi_lock is held; no other assumptions,
3450  * including the state of rq->lock, should be made.
3451  */
set_task_rq_fair(struct sched_entity * se,struct cfs_rq * prev,struct cfs_rq * next)3452 void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next)
3453 {
3454     u64 p_last_update_time;
3455     u64 n_last_update_time;
3456 
3457     if (!sched_feat(ATTACH_AGE_LOAD)) {
3458         return;
3459     }
3460 
3461     /*
3462      * We are supposed to update the task to "current" time, then its up to
3463      * date and ready to go to new CPU/cfs_rq. But we have difficulty in
3464      * getting what current time is, so simply throw away the out-of-date
3465      * time. This will result in the wakee task is less decayed, but giving
3466      * the wakee more load sounds not bad.
3467      */
3468     if (!(se->avg.last_update_time && prev)) {
3469         return;
3470     }
3471 
3472 #ifndef CONFIG_64BIT
3473     {
3474         u64 p_last_update_time_copy;
3475         u64 n_last_update_time_copy;
3476 
3477         do {
3478             p_last_update_time_copy = prev->load_last_update_time_copy;
3479             n_last_update_time_copy = next->load_last_update_time_copy;
3480 
3481             smp_rmb();
3482 
3483             p_last_update_time = prev->avg.last_update_time;
3484             n_last_update_time = next->avg.last_update_time;
3485         } while (p_last_update_time != p_last_update_time_copy || n_last_update_time != n_last_update_time_copy);
3486     }
3487 #else
3488     p_last_update_time = prev->avg.last_update_time;
3489     n_last_update_time = next->avg.last_update_time;
3490 #endif
3491     __update_load_avg_blocked_se(p_last_update_time, se);
3492     se->avg.last_update_time = n_last_update_time;
3493 }
3494 
3495 /*
3496  * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
3497  * propagate its contribution. The key to this propagation is the invariant
3498  * that for each group
3499  *
3500  *   ge->avg == grq->avg                        (1)
3501  *
3502  * _IFF_ we look at the pure running and runnable sums. Because they
3503  * represent the very same entity, just at different points in the hierarchy.
3504  *
3505  * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial
3506  * and simply copies the running/runnable sum over (but still wrong, because
3507  * the group entity and group rq do not have their PELT windows aligned).
3508  *
3509  * However, update_tg_cfs_load() is more complex. So we have:
3510  *
3511  *   ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg        (2)
3512  *
3513  * And since, like util, the runnable part should be directly transferable,
3514  * the following would _appear_ to be the straight forward approach:
3515  *
3516  *   grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg    (3)
3517  *
3518  * And per (1) we have
3519  *
3520  *   ge->avg.runnable_avg == grq->avg.runnable_avg
3521  *
3522  * Which gives
3523  *
3524  *                      ge->load.weight * grq->avg.load_avg
3525  *   ge->avg.load_avg = -----------------------------------        (4)
3526  *                               grq->load.weight
3527  *
3528  * Except that is wrong!
3529  *
3530  * Because while for entities historical weight is not important and we
3531  * really only care about our future and therefore can consider a pure
3532  * runnable sum, runqueues can NOT do this.
3533  *
3534  * We specifically want runqueues to have a load_avg that includes
3535  * historical weights. Those represent the blocked load, the load we expect
3536  * to (shortly) return to us. This only works by keeping the weights as
3537  * integral part of the sum. We therefore cannot decompose as per (3).
3538  *
3539  * Another reason this doesn't work is that runnable isn't a 0-sum entity.
3540  * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
3541  * rq itself is runnable anywhere between 2/3 and 1 depending on how the
3542  * runnable section of these tasks overlap (or not). If they were to perfectly
3543  * align the rq as a whole would be runnable 2/3 of the time. If however we
3544  * always have at least 1 runnable task, the rq as a whole is always runnable.
3545  *
3546  * So we'll have to approximate.. :/
3547  *
3548  * Given the constraint
3549  *
3550  *   ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
3551  *
3552  * We can construct a rule that adds runnable to a rq by assuming minimal
3553  * overlap.
3554  *
3555  * On removal, we'll assume each task is equally runnable; which yields:
3556  *
3557  *   grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
3558  *
3559  * XXX: only do this for the part of runnable > running ?
3560  *
3561  */
3562 
update_tg_cfs_util(struct cfs_rq * cfs_rq,struct sched_entity * se,struct cfs_rq * gcfs_rq)3563 static inline void update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3564 {
3565     long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
3566     u32 divider;
3567 
3568     /* Nothing to update */
3569     if (!delta) {
3570         return;
3571     }
3572 
3573     /*
3574      * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3575      * See ___update_load_avg() for details.
3576      */
3577     divider = get_pelt_divider(&cfs_rq->avg);
3578 
3579     /* Set new sched_entity's utilization */
3580     se->avg.util_avg = gcfs_rq->avg.util_avg;
3581     se->avg.util_sum = se->avg.util_avg * divider;
3582 
3583     /* Update parent cfs_rq utilization */
3584     add_positive(&cfs_rq->avg.util_avg, delta);
3585     cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
3586 }
3587 
update_tg_cfs_runnable(struct cfs_rq * cfs_rq,struct sched_entity * se,struct cfs_rq * gcfs_rq)3588 static inline void update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3589 {
3590     long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
3591     u32 divider;
3592 
3593     /* Nothing to update */
3594     if (!delta) {
3595         return;
3596     }
3597 
3598     /*
3599      * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3600      * See ___update_load_avg() for details.
3601      */
3602     divider = get_pelt_divider(&cfs_rq->avg);
3603 
3604     /* Set new sched_entity's runnable */
3605     se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
3606     se->avg.runnable_sum = se->avg.runnable_avg * divider;
3607 
3608     /* Update parent cfs_rq runnable */
3609     add_positive(&cfs_rq->avg.runnable_avg, delta);
3610     cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
3611 }
3612 
update_tg_cfs_load(struct cfs_rq * cfs_rq,struct sched_entity * se,struct cfs_rq * gcfs_rq)3613 static inline void update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3614 {
3615     long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
3616     unsigned long load_avg;
3617     u64 load_sum = 0;
3618     u32 divider;
3619 
3620     if (!runnable_sum) {
3621         return;
3622     }
3623 
3624     gcfs_rq->prop_runnable_sum = 0;
3625 
3626     /*
3627      * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3628      * See ___update_load_avg() for details.
3629      */
3630     divider = get_pelt_divider(&cfs_rq->avg);
3631 
3632     if (runnable_sum >= 0) {
3633         /*
3634          * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
3635          * the CPU is saturated running == runnable.
3636          */
3637         runnable_sum += se->avg.load_sum;
3638         runnable_sum = min_t(long, runnable_sum, divider);
3639     } else {
3640         /*
3641          * Estimate the new unweighted runnable_sum of the gcfs_rq by
3642          * assuming all tasks are equally runnable.
3643          */
3644         if (scale_load_down(gcfs_rq->load.weight)) {
3645             load_sum = div_s64(gcfs_rq->avg.load_sum, scale_load_down(gcfs_rq->load.weight));
3646         }
3647 
3648         /* But make sure to not inflate se's runnable */
3649         runnable_sum = min(se->avg.load_sum, load_sum);
3650     }
3651 
3652     /*
3653      * runnable_sum can't be lower than running_sum
3654      * Rescale running sum to be in the same range as runnable sum
3655      * running_sum is in [0 : LOAD_AVG_MAX <<  SCHED_CAPACITY_SHIFT]
3656      * runnable_sum is in [0 : LOAD_AVG_MAX]
3657      */
3658     running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
3659     runnable_sum = max(runnable_sum, running_sum);
3660 
3661     load_sum = (s64)se_weight(se) * runnable_sum;
3662     load_avg = div_s64(load_sum, divider);
3663 
3664     delta = load_avg - se->avg.load_avg;
3665 
3666     se->avg.load_sum = runnable_sum;
3667     se->avg.load_avg = load_avg;
3668 
3669     add_positive(&cfs_rq->avg.load_avg, delta);
3670     cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
3671 }
3672 
add_tg_cfs_propagate(struct cfs_rq * cfs_rq,long runnable_sum)3673 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
3674 {
3675     cfs_rq->propagate = 1;
3676     cfs_rq->prop_runnable_sum += runnable_sum;
3677 }
3678 
3679 /* Update task and its cfs_rq load average */
propagate_entity_load_avg(struct sched_entity * se)3680 static inline int propagate_entity_load_avg(struct sched_entity *se)
3681 {
3682     struct cfs_rq *cfs_rq, *gcfs_rq;
3683 
3684     if (entity_is_task(se)) {
3685         return 0;
3686     }
3687 
3688     gcfs_rq = group_cfs_rq(se);
3689     if (!gcfs_rq->propagate) {
3690         return 0;
3691     }
3692 
3693     gcfs_rq->propagate = 0;
3694 
3695     cfs_rq = cfs_rq_of(se);
3696 
3697     add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
3698 
3699     update_tg_cfs_util(cfs_rq, se, gcfs_rq);
3700     update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
3701     update_tg_cfs_load(cfs_rq, se, gcfs_rq);
3702 
3703     trace_pelt_cfs_tp(cfs_rq);
3704     trace_pelt_se_tp(se);
3705 
3706     return 1;
3707 }
3708 
3709 /*
3710  * Check if we need to update the load and the utilization of a blocked
3711  * group_entity
3712  */
skip_blocked_update(struct sched_entity * se)3713 static inline bool skip_blocked_update(struct sched_entity *se)
3714 {
3715     struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3716 
3717     /*
3718      * If sched_entity still have not zero load or utilization, we have to
3719      * decay it:
3720      */
3721     if (se->avg.load_avg || se->avg.util_avg) {
3722         return false;
3723     }
3724 
3725     /*
3726      * If there is a pending propagation, we have to update the load and
3727      * the utilization of the sched_entity:
3728      */
3729     if (gcfs_rq->propagate) {
3730         return false;
3731     }
3732 
3733     /*
3734      * Otherwise, the load and the utilization of the sched_entity is
3735      * already zero and there is no pending propagation, so it will be a
3736      * waste of time to try to decay it:
3737      */
3738     return true;
3739 }
3740 
3741 #else /* CONFIG_FAIR_GROUP_SCHED */
3742 
update_tg_load_avg(struct cfs_rq * cfs_rq)3743 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
3744 {
3745 }
3746 
propagate_entity_load_avg(struct sched_entity * se)3747 static inline int propagate_entity_load_avg(struct sched_entity *se)
3748 {
3749     return 0;
3750 }
3751 
add_tg_cfs_propagate(struct cfs_rq * cfs_rq,long runnable_sum)3752 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
3753 {
3754 }
3755 
3756 #endif /* CONFIG_FAIR_GROUP_SCHED */
3757 
3758 /**
3759  * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
3760  * @now: current time, as per cfs_rq_clock_pelt()
3761  * @cfs_rq: cfs_rq to update
3762  *
3763  * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
3764  * avg. The immediate corollary is that all (fair) tasks must be attached, see
3765  * post_init_entity_util_avg().
3766  *
3767  * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
3768  *
3769  * Returns true if the load decayed or we removed load.
3770  *
3771  * Since both these conditions indicate a changed cfs_rq->avg.load we should
3772  * call update_tg_load_avg() when this function returns true.
3773  */
update_cfs_rq_load_avg(u64 now,struct cfs_rq * cfs_rq)3774 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3775 {
3776     unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0;
3777     struct sched_avg *sa = &cfs_rq->avg;
3778     int decayed = 0;
3779 
3780     if (cfs_rq->removed.nr) {
3781         unsigned long r;
3782         u32 divider = get_pelt_divider(&cfs_rq->avg);
3783 
3784         raw_spin_lock(&cfs_rq->removed.lock);
3785         swap(cfs_rq->removed.util_avg, removed_util);
3786         swap(cfs_rq->removed.load_avg, removed_load);
3787         swap(cfs_rq->removed.runnable_avg, removed_runnable);
3788         cfs_rq->removed.nr = 0;
3789         raw_spin_unlock(&cfs_rq->removed.lock);
3790 
3791         r = removed_load;
3792         sub_positive(&sa->load_avg, r);
3793         sa->load_sum = sa->load_avg * divider;
3794 
3795         r = removed_util;
3796         sub_positive(&sa->util_avg, r);
3797         sub_positive(&sa->util_sum, r * divider);
3798         /*
3799          * Because of rounding, se->util_sum might ends up being +1 more than
3800          * cfs->util_sum. Although this is not a problem by itself, detaching
3801          * a lot of tasks with the rounding problem between 2 updates of
3802          * util_avg (~1ms) can make cfs->util_sum becoming null whereas
3803          * cfs_util_avg is not.
3804          * Check that util_sum is still above its lower bound for the new
3805          * util_avg. Given that period_contrib might have moved since the last
3806          * sync, we are only sure that util_sum must be above or equal to
3807          *    util_avg * minimum possible divider
3808          */
3809         sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
3810 
3811         r = removed_runnable;
3812         sub_positive(&sa->runnable_avg, r);
3813         sa->runnable_sum = sa->runnable_avg * divider;
3814 
3815         /*
3816          * removed_runnable is the unweighted version of removed_load so we
3817          * can use it to estimate removed_load_sum.
3818          */
3819         add_tg_cfs_propagate(cfs_rq, -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
3820 
3821         decayed = 1;
3822     }
3823 
3824     decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
3825 
3826 #ifndef CONFIG_64BIT
3827     smp_wmb();
3828     cfs_rq->load_last_update_time_copy = sa->last_update_time;
3829 #endif
3830 
3831     return decayed;
3832 }
3833 
3834 /**
3835  * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3836  * @cfs_rq: cfs_rq to attach to
3837  * @se: sched_entity to attach
3838  *
3839  * Must call update_cfs_rq_load_avg() before this, since we rely on
3840  * cfs_rq->avg.last_update_time being current.
3841  */
attach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3842 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3843 {
3844     /*
3845      * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3846      * See ___update_load_avg() for details.
3847      */
3848     u32 divider = get_pelt_divider(&cfs_rq->avg);
3849 
3850     /*
3851      * When we attach the @se to the @cfs_rq, we must align the decay
3852      * window because without that, really weird and wonderful things can
3853      * happen.
3854      *
3855      * XXX illustrate
3856      */
3857     se->avg.last_update_time = cfs_rq->avg.last_update_time;
3858     se->avg.period_contrib = cfs_rq->avg.period_contrib;
3859 
3860     /*
3861      * Hell(o) Nasty stuff.. we need to recompute _sum based on the new
3862      * period_contrib. This isn't strictly correct, but since we're
3863      * entirely outside of the PELT hierarchy, nobody cares if we truncate
3864      * _sum a little.
3865      */
3866     se->avg.util_sum = se->avg.util_avg * divider;
3867 
3868     se->avg.runnable_sum = se->avg.runnable_avg * divider;
3869 
3870     se->avg.load_sum = se->avg.load_avg * divider;
3871     if (se_weight(se) < se->avg.load_sum)
3872         se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
3873     else
3874         se->avg.load_sum = 1;
3875 
3876     enqueue_load_avg(cfs_rq, se);
3877     cfs_rq->avg.util_avg += se->avg.util_avg;
3878     cfs_rq->avg.util_sum += se->avg.util_sum;
3879     cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
3880     cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
3881 
3882     add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
3883 
3884     cfs_rq_util_change(cfs_rq, 0);
3885 
3886     trace_pelt_cfs_tp(cfs_rq);
3887 }
3888 
3889 /**
3890  * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3891  * @cfs_rq: cfs_rq to detach from
3892  * @se: sched_entity to detach
3893  *
3894  * Must call update_cfs_rq_load_avg() before this, since we rely on
3895  * cfs_rq->avg.last_update_time being current.
3896  */
detach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)3897 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3898 {
3899     /*
3900      * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3901      * See ___update_load_avg() for details.
3902      */
3903     u32 divider = get_pelt_divider(&cfs_rq->avg);
3904 
3905     dequeue_load_avg(cfs_rq, se);
3906     sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3907     cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
3908     sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
3909     cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
3910 
3911     add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
3912 
3913     cfs_rq_util_change(cfs_rq, 0);
3914 
3915     trace_pelt_cfs_tp(cfs_rq);
3916 }
3917 
3918 /*
3919  * Optional action to be done while updating the load average
3920  */
3921 #define UPDATE_TG 0x1
3922 #define SKIP_AGE_LOAD 0x2
3923 #define DO_ATTACH 0x4
3924 
3925 /* Update task and its cfs_rq load average */
update_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)3926 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3927 {
3928     u64 now = cfs_rq_clock_pelt(cfs_rq);
3929     int decayed;
3930 
3931     /*
3932      * Track task load average for carrying it to new CPU after migrated, and
3933      * track group sched_entity load average for task_h_load calc in migration
3934      */
3935     if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) {
3936         __update_load_avg_se(now, cfs_rq, se);
3937     }
3938 
3939     decayed = update_cfs_rq_load_avg(now, cfs_rq);
3940     decayed |= propagate_entity_load_avg(se);
3941 
3942     if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
3943         /*
3944          * DO_ATTACH means we're here from enqueue_entity().
3945          * !last_update_time means we've passed through
3946          * migrate_task_rq_fair() indicating we migrated.
3947          *
3948          * IOW we're enqueueing a task on a new CPU.
3949          */
3950         attach_entity_load_avg(cfs_rq, se);
3951         update_tg_load_avg(cfs_rq);
3952     } else if (decayed) {
3953         cfs_rq_util_change(cfs_rq, 0);
3954 
3955         if (flags & UPDATE_TG) {
3956             update_tg_load_avg(cfs_rq);
3957         }
3958     }
3959 }
3960 
3961 #ifndef CONFIG_64BIT
cfs_rq_last_update_time(struct cfs_rq * cfs_rq)3962 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3963 {
3964     u64 last_update_time_copy;
3965     u64 last_update_time;
3966 
3967     do {
3968         last_update_time_copy = cfs_rq->load_last_update_time_copy;
3969         smp_rmb();
3970         last_update_time = cfs_rq->avg.last_update_time;
3971     } while (last_update_time != last_update_time_copy);
3972 
3973     return last_update_time;
3974 }
3975 #else
cfs_rq_last_update_time(struct cfs_rq * cfs_rq)3976 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3977 {
3978     return cfs_rq->avg.last_update_time;
3979 }
3980 #endif
3981 
3982 /*
3983  * Synchronize entity load avg of dequeued entity without locking
3984  * the previous rq.
3985  */
sync_entity_load_avg(struct sched_entity * se)3986 static void sync_entity_load_avg(struct sched_entity *se)
3987 {
3988     struct cfs_rq *cfs_rq = cfs_rq_of(se);
3989     u64 last_update_time;
3990 
3991     last_update_time = cfs_rq_last_update_time(cfs_rq);
3992     __update_load_avg_blocked_se(last_update_time, se);
3993 }
3994 
3995 /*
3996  * Task first catches up with cfs_rq, and then subtract
3997  * itself from the cfs_rq (task must be off the queue now).
3998  */
remove_entity_load_avg(struct sched_entity * se)3999 static void remove_entity_load_avg(struct sched_entity *se)
4000 {
4001     struct cfs_rq *cfs_rq = cfs_rq_of(se);
4002     unsigned long flags;
4003 
4004     /*
4005      * tasks cannot exit without having gone through wake_up_new_task() ->
4006      * post_init_entity_util_avg() which will have added things to the
4007      * cfs_rq, so we can remove unconditionally.
4008      */
4009 
4010     sync_entity_load_avg(se);
4011 
4012     raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
4013     ++cfs_rq->removed.nr;
4014     cfs_rq->removed.util_avg += se->avg.util_avg;
4015     cfs_rq->removed.load_avg += se->avg.load_avg;
4016     cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
4017     raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
4018 }
4019 
cfs_rq_runnable_avg(struct cfs_rq * cfs_rq)4020 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq)
4021 {
4022     return cfs_rq->avg.runnable_avg;
4023 }
4024 
cfs_rq_load_avg(struct cfs_rq * cfs_rq)4025 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
4026 {
4027     return cfs_rq->avg.load_avg;
4028 }
4029 
4030 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
4031 
task_util(struct task_struct * p)4032 static inline unsigned long task_util(struct task_struct *p)
4033 {
4034 #ifdef CONFIG_SCHED_WALT
4035     if (likely(!walt_disabled && sysctl_sched_use_walt_task_util)) {
4036         return p->ravg.demand_scaled;
4037     }
4038 #endif
4039     return READ_ONCE(p->se.avg.util_avg);
4040 }
4041 
_task_util_est(struct task_struct * p)4042 static inline unsigned long _task_util_est(struct task_struct *p)
4043 {
4044     struct util_est ue = READ_ONCE(p->se.avg.util_est);
4045 
4046     return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
4047 }
4048 
task_util_est(struct task_struct * p)4049 static inline unsigned long task_util_est(struct task_struct *p)
4050 {
4051 #ifdef CONFIG_SCHED_WALT
4052     if (likely(!walt_disabled && sysctl_sched_use_walt_task_util)) {
4053         return p->ravg.demand_scaled;
4054     }
4055 #endif
4056     return max(task_util(p), _task_util_est(p));
4057 }
4058 
4059 #ifdef CONFIG_UCLAMP_TASK
4060 #ifdef CONFIG_SCHED_RT_CAS
uclamp_task_util(struct task_struct * p)4061 unsigned long uclamp_task_util(struct task_struct *p)
4062 #else
4063 static inline unsigned long uclamp_task_util(struct task_struct *p)
4064 #endif
4065 {
4066     return clamp(task_util_est(p), uclamp_eff_value(p, UCLAMP_MIN), uclamp_eff_value(p, UCLAMP_MAX));
4067 }
4068 #else
4069 #ifdef CONFIG_SCHED_RT_CAS
uclamp_task_util(struct task_struct * p)4070 unsigned long uclamp_task_util(struct task_struct *p)
4071 #else
4072 static inline unsigned long uclamp_task_util(struct task_struct *p)
4073 #endif
4074 {
4075     return task_util_est(p);
4076 }
4077 #endif
4078 
util_est_enqueue(struct cfs_rq * cfs_rq,struct task_struct * p)4079 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p)
4080 {
4081     unsigned int enqueued;
4082 
4083     if (!sched_feat(UTIL_EST)) {
4084         return;
4085     }
4086 
4087     /* Update root cfs_rq's estimated utilization */
4088     enqueued = cfs_rq->avg.util_est.enqueued;
4089     enqueued += _task_util_est(p);
4090     WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
4091 
4092     trace_sched_util_est_cfs_tp(cfs_rq);
4093 }
4094 
util_est_dequeue(struct cfs_rq * cfs_rq,struct task_struct * p)4095 static inline void util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p)
4096 {
4097     unsigned int enqueued;
4098 
4099     if (!sched_feat(UTIL_EST)) {
4100         return;
4101     }
4102 
4103     /* Update root cfs_rq's estimated utilization */
4104     enqueued = cfs_rq->avg.util_est.enqueued;
4105     enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
4106     WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
4107 
4108     trace_sched_util_est_cfs_tp(cfs_rq);
4109 }
4110 
4111 #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / FAIR_ONEHUNDRED)
4112 
4113 /*
4114  * Check if a (signed) value is within a specified (unsigned) margin,
4115  * based on the observation that
4116  *
4117  *     abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
4118  *
4119  * NOTE: this only works when value + maring < INT_MAX.
4120  */
within_margin(int value,int margin)4121 static inline bool within_margin(int value, int margin)
4122 {
4123     return ((unsigned int)(value + margin - 1) < (0x2 * margin - 1));
4124 }
4125 
util_est_update(struct cfs_rq * cfs_rq,struct task_struct * p,bool task_sleep)4126 static inline void util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
4127 {
4128     long last_ewma_diff, last_enqueued_diff;
4129     struct util_est ue;
4130 
4131     if (!sched_feat(UTIL_EST)) {
4132         return;
4133     }
4134 
4135     /*
4136      * Skip update of task's estimated utilization when the task has not
4137      * yet completed an activation, e.g. being migrated.
4138      */
4139     if (!task_sleep) {
4140         return;
4141     }
4142 
4143     /*
4144      * If the PELT values haven't changed since enqueue time,
4145      * skip the util_est update.
4146      */
4147     ue = p->se.avg.util_est;
4148     if (ue.enqueued & UTIL_AVG_UNCHANGED) {
4149         return;
4150     }
4151 
4152     last_enqueued_diff = ue.enqueued;
4153 
4154     /*
4155      * Reset EWMA on utilization increases, the moving average is used only
4156      * to smooth utilization decreases.
4157      */
4158     ue.enqueued = task_util(p);
4159     if (sched_feat(UTIL_EST_FASTUP)) {
4160         if (ue.ewma < ue.enqueued) {
4161             ue.ewma = ue.enqueued;
4162             goto done;
4163         }
4164     }
4165 
4166     /*
4167      * Skip update of task's estimated utilization when its members are
4168      * already ~1% close to its last activation value.
4169      */
4170     last_ewma_diff = ue.enqueued - ue.ewma;
4171     last_enqueued_diff -= ue.enqueued;
4172     if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
4173         if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN)) {
4174             goto done;
4175         }
4176 
4177         return;
4178     }
4179 
4180     /*
4181      * To avoid overestimation of actual task utilization, skip updates if
4182      * we cannot grant there is idle time in this CPU.
4183      */
4184     if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) {
4185         return;
4186     }
4187 
4188     /*
4189      * Update Task's estimated utilization
4190      *
4191      * When *p completes an activation we can consolidate another sample
4192      * of the task size. This is done by storing the current PELT value
4193      * as ue.enqueued and by using this value to update the Exponential
4194      * Weighted Moving Average (EWMA):
4195      *
4196      *  ewma(t) = w *  task_util(p) + (1-w) * ewma(t-1)
4197      *          = w *  task_util(p) +         ewma(t-1)  - w * ewma(t-1)
4198      *          = w * (task_util(p) -         ewma(t-1)) +     ewma(t-1)
4199      *          = w * (      last_ewma_diff            ) +     ewma(t-1)
4200      *          = w * (last_ewma_diff  +  ewma(t-1) / w)
4201      *
4202      * Where 'w' is the weight of new samples, which is configured to be
4203      * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
4204      */
4205     ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
4206     ue.ewma += last_ewma_diff;
4207     ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
4208 done:
4209     ue.enqueued |= UTIL_AVG_UNCHANGED;
4210     WRITE_ONCE(p->se.avg.util_est, ue);
4211 
4212     trace_sched_util_est_se_tp(&p->se);
4213 }
4214 
task_fits_capacity(struct task_struct * p,long capacity)4215 static inline int task_fits_capacity(struct task_struct *p, long capacity)
4216 {
4217     return fits_capacity(uclamp_task_util(p), capacity);
4218 }
4219 
4220 #ifdef CONFIG_SCHED_RTG
task_fits_max(struct task_struct * p,int cpu)4221 bool task_fits_max(struct task_struct *p, int cpu)
4222 {
4223     unsigned long capacity = capacity_orig_of(cpu);
4224     unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity;
4225     if (capacity == max_capacity) {
4226         return true;
4227     }
4228 
4229     return task_fits_capacity(p, capacity);
4230 }
4231 #endif
4232 
update_misfit_status(struct task_struct * p,struct rq * rq)4233 static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
4234 {
4235     bool task_fits = false;
4236 #ifdef CONFIG_SCHED_RTG
4237     int cpu = cpu_of(rq);
4238     struct cpumask *rtg_target = NULL;
4239 #endif
4240 
4241     if (!static_branch_unlikely(&sched_asym_cpucapacity)) {
4242         return;
4243     }
4244 
4245     if (!p || p->nr_cpus_allowed == 1) {
4246         rq->misfit_task_load = 0;
4247         return;
4248     }
4249 
4250 #ifdef CONFIG_SCHED_RTG
4251     rtg_target = find_rtg_target(p);
4252     if (rtg_target) {
4253         task_fits = capacity_orig_of(cpu) >= capacity_orig_of(cpumask_first(rtg_target));
4254     } else {
4255         task_fits = task_fits_capacity(p, capacity_of(cpu_of(rq)));
4256     }
4257 #else
4258     task_fits = task_fits_capacity(p, capacity_of(cpu_of(rq)));
4259 #endif
4260     if (task_fits) {
4261         rq->misfit_task_load = 0;
4262         return;
4263     }
4264 
4265     /*
4266      * Make sure that misfit_task_load will not be null even if
4267      * task_h_load() returns 0.
4268      */
4269     rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
4270 }
4271 
4272 #else /* CONFIG_SMP */
4273 
4274 #define UPDATE_TG 0x0
4275 #define SKIP_AGE_LOAD 0x0
4276 #define DO_ATTACH 0x0
4277 
update_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se,int not_used1)4278 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
4279 {
4280     cfs_rq_util_change(cfs_rq, 0);
4281 }
4282 
remove_entity_load_avg(struct sched_entity * se)4283 static inline void remove_entity_load_avg(struct sched_entity *se)
4284 {
4285 }
4286 
attach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)4287 static inline void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4288 {
4289 }
detach_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se)4290 static inline void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4291 {
4292 }
4293 
newidle_balance(struct rq * rq,struct rq_flags * rf)4294 static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
4295 {
4296     return 0;
4297 }
4298 
util_est_enqueue(struct cfs_rq * cfs_rq,struct task_struct * p)4299 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p)
4300 {
4301 }
4302 
util_est_dequeue(struct cfs_rq * cfs_rq,struct task_struct * p)4303 static inline void util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p)
4304 {
4305 }
4306 
util_est_update(struct cfs_rq * cfs_rq,struct task_struct * p,bool task_sleep)4307 static inline void util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
4308 {
4309 }
update_misfit_status(struct task_struct * p,struct rq * rq)4310 static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
4311 {
4312 }
4313 
4314 #endif /* CONFIG_SMP */
4315 
check_spread(struct cfs_rq * cfs_rq,struct sched_entity * se)4316 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
4317 {
4318 #ifdef CONFIG_SCHED_DEBUG
4319     s64 d = se->vruntime - cfs_rq->min_vruntime;
4320 
4321     if (d < 0) {
4322         d = -d;
4323     }
4324 
4325     if (d > 0x3 * sysctl_sched_latency) {
4326         schedstat_inc(cfs_rq->nr_spread_over);
4327     }
4328 #endif
4329 }
4330 
place_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int initial)4331 static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
4332 {
4333     u64 vruntime = cfs_rq->min_vruntime;
4334 
4335     /*
4336      * The 'current' period is already promised to the current tasks,
4337      * however the extra weight of the new task will slow them down a
4338      * little, place the new task so that it fits in the slot that
4339      * stays open at the end.
4340      */
4341     if (initial && sched_feat(START_DEBIT)) {
4342         vruntime += sched_vslice(cfs_rq, se);
4343     }
4344 
4345     /* sleeps up to a single latency don't count. */
4346     if (!initial) {
4347         unsigned long thresh = sysctl_sched_latency;
4348 
4349         /*
4350          * Halve their sleep time's effect, to allow
4351          * for a gentler effect of sleepers:
4352          */
4353         if (sched_feat(GENTLE_FAIR_SLEEPERS)) {
4354             thresh >>= 1;
4355         }
4356 
4357         vruntime -= thresh;
4358     }
4359 
4360     /* ensure we never gain time by being placed backwards. */
4361     se->vruntime = max_vruntime(se->vruntime, vruntime);
4362 }
4363 
4364 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
4365 
check_schedstat_required(void)4366 static inline void check_schedstat_required(void)
4367 {
4368 #ifdef CONFIG_SCHEDSTATS
4369     if (schedstat_enabled()) {
4370         return;
4371     }
4372 
4373     /* Force schedstat enabled if a dependent tracepoint is active */
4374     if (trace_sched_stat_wait_enabled() || trace_sched_stat_sleep_enabled() || trace_sched_stat_iowait_enabled() ||
4375         trace_sched_stat_blocked_enabled() || trace_sched_stat_runtime_enabled()) {
4376         printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
4377                              "stat_blocked and stat_runtime require the "
4378                              "kernel parameter schedstats=enable or "
4379                              "kernel.sched_schedstats=1\n");
4380     }
4381 #endif
4382 }
4383 
4384 static inline bool cfs_bandwidth_used(void);
4385 
4386 /*
4387  * MIGRATION
4388  *
4389  *    dequeue
4390  *      update_curr()
4391  *        update_min_vruntime()
4392  *      vruntime -= min_vruntime
4393  *
4394  *    enqueue
4395  *      update_curr()
4396  *        update_min_vruntime()
4397  *      vruntime += min_vruntime
4398  *
4399  * this way the vruntime transition between RQs is done when both
4400  * min_vruntime are up-to-date.
4401  *
4402  * WAKEUP (remote)
4403  *
4404  *    ->migrate_task_rq_fair() (p->state == TASK_WAKING)
4405  *      vruntime -= min_vruntime
4406  *
4407  *    enqueue
4408  *      update_curr()
4409  *        update_min_vruntime()
4410  *      vruntime += min_vruntime
4411  *
4412  * this way we don't have the most up-to-date min_vruntime on the originating
4413  * CPU and an up-to-date min_vruntime on the destination CPU.
4414  */
4415 
enqueue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)4416 static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4417 {
4418     bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
4419     bool curr = cfs_rq->curr == se;
4420 
4421     /*
4422      * If we're the current task, we must renormalise before calling
4423      * update_curr().
4424      */
4425     if (renorm && curr) {
4426         se->vruntime += cfs_rq->min_vruntime;
4427     }
4428 
4429     update_curr(cfs_rq);
4430 
4431     /*
4432      * Otherwise, renormalise after, such that we're placed at the current
4433      * moment in time, instead of some random moment in the past. Being
4434      * placed in the past could significantly boost this task to the
4435      * fairness detriment of existing tasks.
4436      */
4437     if (renorm && !curr) {
4438         se->vruntime += cfs_rq->min_vruntime;
4439     }
4440 
4441     /*
4442      * When enqueuing a sched_entity, we must:
4443      *   - Update loads to have both entity and cfs_rq synced with now.
4444      *   - Add its load to cfs_rq->runnable_avg
4445      *   - For group_entity, update its weight to reflect the new share of
4446      *     its group cfs_rq
4447      *   - Add its new weight to cfs_rq->load.weight
4448      */
4449     update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
4450     se_update_runnable(se);
4451     update_cfs_group(se);
4452     account_entity_enqueue(cfs_rq, se);
4453 
4454     if (flags & ENQUEUE_WAKEUP) {
4455         place_entity(cfs_rq, se, 0);
4456     }
4457 
4458     check_schedstat_required();
4459     update_stats_enqueue(cfs_rq, se, flags);
4460     check_spread(cfs_rq, se);
4461     if (!curr) {
4462         fair_enqueue_entity(cfs_rq, se);
4463     }
4464     se->on_rq = 1;
4465 
4466     /*
4467      * When bandwidth control is enabled, cfs might have been removed
4468      * because of a parent been throttled but cfs->nr_running > 1. Try to
4469      * add it unconditionnally.
4470      */
4471     if (cfs_rq->nr_running == 1 || cfs_bandwidth_used()) {
4472         list_add_leaf_cfs_rq(cfs_rq);
4473     }
4474 
4475     if (cfs_rq->nr_running == 1) {
4476         check_enqueue_throttle(cfs_rq);
4477     }
4478 }
4479 
fair_clear_buddies_last(struct sched_entity * se)4480 static void fair_clear_buddies_last(struct sched_entity *se)
4481 {
4482     for_each_sched_entity(se) {
4483         struct cfs_rq *cfs_rq = cfs_rq_of(se);
4484         if (cfs_rq->last != se) {
4485             break;
4486         }
4487 
4488         cfs_rq->last = NULL;
4489     }
4490 }
4491 
fair_clear_buddies_next(struct sched_entity * se)4492 static void fair_clear_buddies_next(struct sched_entity *se)
4493 {
4494     for_each_sched_entity(se) {
4495         struct cfs_rq *cfs_rq = cfs_rq_of(se);
4496         if (cfs_rq->next != se) {
4497             break;
4498         }
4499 
4500         cfs_rq->next = NULL;
4501     }
4502 }
4503 
fair_clear_buddies_skip(struct sched_entity * se)4504 static void fair_clear_buddies_skip(struct sched_entity *se)
4505 {
4506     for_each_sched_entity(se) {
4507         struct cfs_rq *cfs_rq = cfs_rq_of(se);
4508         if (cfs_rq->skip != se) {
4509             break;
4510         }
4511 
4512         cfs_rq->skip = NULL;
4513     }
4514 }
4515 
clear_buddies(struct cfs_rq * cfs_rq,struct sched_entity * se)4516 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
4517 {
4518     if (cfs_rq->last == se) {
4519         fair_clear_buddies_last(se);
4520     }
4521 
4522     if (cfs_rq->next == se) {
4523         fair_clear_buddies_next(se);
4524     }
4525 
4526     if (cfs_rq->skip == se) {
4527         fair_clear_buddies_skip(se);
4528     }
4529 }
4530 
4531 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4532 
dequeue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)4533 static void dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4534 {
4535     /*
4536      * Update run-time statistics of the 'current'.
4537      */
4538     update_curr(cfs_rq);
4539 
4540     /*
4541      * When dequeuing a sched_entity, we must:
4542      *   - Update loads to have both entity and cfs_rq synced with now.
4543      *   - Subtract its load from the cfs_rq->runnable_avg.
4544      *   - Subtract its previous weight from cfs_rq->load.weight.
4545      *   - For group entity, update its weight to reflect the new share
4546      *     of its group cfs_rq.
4547      */
4548     update_load_avg(cfs_rq, se, UPDATE_TG);
4549     se_update_runnable(se);
4550 
4551     update_stats_dequeue(cfs_rq, se, flags);
4552 
4553     clear_buddies(cfs_rq, se);
4554 
4555     if (se != cfs_rq->curr) {
4556         fair_dequeue_entity(cfs_rq, se);
4557     }
4558     se->on_rq = 0;
4559     account_entity_dequeue(cfs_rq, se);
4560 
4561     /*
4562      * Normalize after update_curr(); which will also have moved
4563      * min_vruntime if @se is the one holding it back. But before doing
4564      * update_min_vruntime() again, which will discount @se's position and
4565      * can move min_vruntime forward still more.
4566      */
4567     if (!(flags & DEQUEUE_SLEEP)) {
4568         se->vruntime -= cfs_rq->min_vruntime;
4569     }
4570 
4571     /* return excess runtime on last dequeue */
4572     return_cfs_rq_runtime(cfs_rq);
4573 
4574     update_cfs_group(se);
4575 
4576     /*
4577      * Now advance min_vruntime if @se was the entity holding it back,
4578      * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
4579      * put back on, and if we advance min_vruntime, we'll be placed back
4580      * further than we started -- ie. we'll be penalized.
4581      */
4582     if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) {
4583         update_min_vruntime(cfs_rq);
4584     }
4585 }
4586 
4587 /*
4588  * Preempt the current task with a newly woken task if needed:
4589  */
check_preempt_tick(struct cfs_rq * cfs_rq,struct sched_entity * curr)4590 static void check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
4591 {
4592     unsigned long ideal_runtime, delta_exec;
4593     struct sched_entity *se;
4594     s64 delta;
4595 
4596     ideal_runtime = sched_slice(cfs_rq, curr);
4597     delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
4598     if (delta_exec > ideal_runtime) {
4599         resched_curr(rq_of(cfs_rq));
4600         /*
4601          * The current task ran long enough, ensure it doesn't get
4602          * re-elected due to buddy favours.
4603          */
4604         clear_buddies(cfs_rq, curr);
4605         return;
4606     }
4607 
4608     /*
4609      * Ensure that a task that missed wakeup preemption by a
4610      * narrow margin doesn't have to wait for a full slice.
4611      * This also mitigates buddy induced latencies under load.
4612      */
4613     if (delta_exec < (unsigned long)sysctl_sched_min_granularity) {
4614         return;
4615     }
4616 
4617     se = __pick_first_entity(cfs_rq);
4618     delta = curr->vruntime - se->vruntime;
4619 
4620     if (delta < 0) {
4621         return;
4622     }
4623 
4624     if (delta > ideal_runtime) {
4625         resched_curr(rq_of(cfs_rq));
4626     }
4627 }
4628 
set_next_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)4629 static void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
4630 {
4631     /* 'current' is not kept within the tree. */
4632     if (se->on_rq) {
4633         /*
4634          * Any task has to be enqueued before it get to execute on
4635          * a CPU. So account for the time it spent waiting on the
4636          * runqueue.
4637          */
4638         update_stats_wait_end(cfs_rq, se);
4639         fair_dequeue_entity(cfs_rq, se);
4640         update_load_avg(cfs_rq, se, UPDATE_TG);
4641     }
4642 
4643     update_stats_curr_start(cfs_rq, se);
4644     cfs_rq->curr = se;
4645 
4646     /*
4647      * Track our maximum slice length, if the CPU's load is at
4648      * least twice that of our own weight (i.e. dont track it
4649      * when there are only lesser-weight tasks around):
4650      */
4651     if (schedstat_enabled() && rq_of(cfs_rq)->cfs.load.weight >= 0x2 * se->load.weight) {
4652         schedstat_set(se->statistics.slice_max, max((u64)schedstat_val(se->statistics.slice_max),
4653                                                     se->sum_exec_runtime - se->prev_sum_exec_runtime));
4654     }
4655 
4656     se->prev_sum_exec_runtime = se->sum_exec_runtime;
4657 }
4658 
4659 static int wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
4660 
4661 /*
4662  * Pick the next process, keeping these things in mind, in this order:
4663  * 1) keep things fair between processes/task groups
4664  * 2) pick the "next" process, since someone really wants that to run
4665  * 3) pick the "last" process, for cache locality
4666  * 4) do not run the "skip" process, if something else is available
4667  */
pick_next_entity(struct cfs_rq * cfs_rq,struct sched_entity * curr)4668 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
4669 {
4670     struct sched_entity *left = __pick_first_entity(cfs_rq);
4671     struct sched_entity *se;
4672 
4673     /*
4674      * If curr is set we have to see if its left of the leftmost entity
4675      * still in the tree, provided there was anything in the tree at all.
4676      */
4677     if (!left || (curr && entity_before(curr, left))) {
4678         left = curr;
4679     }
4680 
4681     se = left; /* ideally we run the leftmost entity */
4682 
4683     /*
4684      * Avoid running the skip buddy, if running something else can
4685      * be done without getting too unfair.
4686      */
4687     if (cfs_rq->skip == se) {
4688         struct sched_entity *second;
4689 
4690         if (se == curr) {
4691             second = __pick_first_entity(cfs_rq);
4692         } else {
4693             second = fair_pick_next_entity(se);
4694             if (!second || (curr && entity_before(curr, second))) {
4695                 second = curr;
4696             }
4697         }
4698 
4699         if (second && wakeup_preempt_entity(second, left) < 1) {
4700             se = second;
4701         }
4702     }
4703 
4704     if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) {
4705         /*
4706          * Someone really wants this to run. If it's not unfair, run it.
4707          */
4708         se = cfs_rq->next;
4709     } else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) {
4710         /*
4711          * Prefer last buddy, try to return the CPU to a preempted task.
4712          */
4713         se = cfs_rq->last;
4714     }
4715 
4716     clear_buddies(cfs_rq, se);
4717 
4718     return se;
4719 }
4720 
4721 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4722 
put_prev_entity(struct cfs_rq * cfs_rq,struct sched_entity * prev)4723 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
4724 {
4725     /*
4726      * If still on the runqueue then deactivate_task()
4727      * was not called and update_curr() has to be done:
4728      */
4729     if (prev->on_rq) {
4730         update_curr(cfs_rq);
4731     }
4732 
4733     /* throttle cfs_rqs exceeding runtime */
4734     check_cfs_rq_runtime(cfs_rq);
4735 
4736     check_spread(cfs_rq, prev);
4737 
4738     if (prev->on_rq) {
4739         update_stats_wait_start(cfs_rq, prev);
4740         /* Put 'current' back into the tree. */
4741         fair_enqueue_entity(cfs_rq, prev);
4742         /* in !on_rq case, update occurred at dequeue */
4743         update_load_avg(cfs_rq, prev, 0);
4744     }
4745     cfs_rq->curr = NULL;
4746 }
4747 
entity_tick(struct cfs_rq * cfs_rq,struct sched_entity * curr,int queued)4748 static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
4749 {
4750     /*
4751      * Update run-time statistics of the 'current'.
4752      */
4753     update_curr(cfs_rq);
4754 
4755     /*
4756      * Ensure that runnable average is periodically updated.
4757      */
4758     update_load_avg(cfs_rq, curr, UPDATE_TG);
4759     update_cfs_group(curr);
4760 
4761 #ifdef CONFIG_SCHED_HRTICK
4762     /*
4763      * queued ticks are scheduled to match the slice, so don't bother
4764      * validating it and just reschedule.
4765      */
4766     if (queued) {
4767         resched_curr(rq_of(cfs_rq));
4768         return;
4769     }
4770     /*
4771      * don't let the period tick interfere with the hrtick preemption
4772      */
4773     if (!sched_feat(DOUBLE_TICK) && hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) {
4774         return;
4775     }
4776 #endif
4777 
4778     if (cfs_rq->nr_running > 1) {
4779         check_preempt_tick(cfs_rq, curr);
4780     }
4781 }
4782 
4783 /**************************************************
4784  * CFS bandwidth control machinery
4785  */
4786 
4787 #ifdef CONFIG_CFS_BANDWIDTH
4788 
4789 #ifdef CONFIG_JUMP_LABEL
4790 static struct static_key fair_cfs_bandwidth_used;
4791 
cfs_bandwidth_used(void)4792 static inline bool cfs_bandwidth_used(void)
4793 {
4794     return static_key_false(&fair_cfs_bandwidth_used);
4795 }
4796 
cfs_bandwidth_usage_inc(void)4797 void cfs_bandwidth_usage_inc(void)
4798 {
4799     static_key_slow_inc_cpuslocked(&fair_cfs_bandwidth_used);
4800 }
4801 
cfs_bandwidth_usage_dec(void)4802 void cfs_bandwidth_usage_dec(void)
4803 {
4804     static_key_slow_dec_cpuslocked(&fair_cfs_bandwidth_used);
4805 }
4806 #else  /* CONFIG_JUMP_LABEL */
cfs_bandwidth_used(void)4807 static bool cfs_bandwidth_used(void)
4808 {
4809     return true;
4810 }
4811 
cfs_bandwidth_usage_inc(void)4812 void cfs_bandwidth_usage_inc(void)
4813 {
4814 }
cfs_bandwidth_usage_dec(void)4815 void cfs_bandwidth_usage_dec(void)
4816 {
4817 }
4818 #endif /* CONFIG_JUMP_LABEL */
4819 
4820 /*
4821  * default period for cfs group bandwidth.
4822  * default: 0.1s, units: nanoseconds
4823  */
default_cfs_period(void)4824 static inline u64 default_cfs_period(void)
4825 {
4826     return 100000000ULL;
4827 }
4828 
sched_cfs_bandwidth_slice(void)4829 static inline u64 sched_cfs_bandwidth_slice(void)
4830 {
4831     return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
4832 }
4833 
4834 /*
4835  * Replenish runtime according to assigned quota. We use sched_clock_cpu
4836  * directly instead of rq->clock to avoid adding additional synchronization
4837  * around rq->lock.
4838  *
4839  * requires cfs_b->lock
4840  */
__refill_cfs_bandwidth_runtime(struct cfs_bandwidth * cfs_b)4841 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
4842 {
4843     if (cfs_b->quota != RUNTIME_INF) {
4844         cfs_b->runtime = cfs_b->quota;
4845     }
4846 }
4847 
tg_cfs_bandwidth(struct task_group * tg)4848 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4849 {
4850     return &tg->cfs_bandwidth;
4851 }
4852 
4853 /* returns 0 on failure to allocate runtime */
fair_assign_cfs_rq_runtime(struct cfs_bandwidth * cfs_b,struct cfs_rq * cfs_rq,u64 target_runtime)4854 static int fair_assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, struct cfs_rq *cfs_rq, u64 target_runtime)
4855 {
4856     u64 min_amount, amount = 0;
4857 
4858     lockdep_assert_held(&cfs_b->lock);
4859 
4860     /* note: this is a positive sum as runtime_remaining <= 0 */
4861     min_amount = target_runtime - cfs_rq->runtime_remaining;
4862 
4863     if (cfs_b->quota == RUNTIME_INF) {
4864         amount = min_amount;
4865     } else {
4866         start_cfs_bandwidth(cfs_b);
4867 
4868         if (cfs_b->runtime > 0) {
4869             amount = min(cfs_b->runtime, min_amount);
4870             cfs_b->runtime -= amount;
4871             cfs_b->idle = 0;
4872         }
4873     }
4874 
4875     cfs_rq->runtime_remaining += amount;
4876 
4877     return cfs_rq->runtime_remaining > 0;
4878 }
4879 
4880 /* returns 0 on failure to allocate runtime */
assign_cfs_rq_runtime(struct cfs_rq * cfs_rq)4881 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4882 {
4883     struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4884     int ret;
4885 
4886     raw_spin_lock(&cfs_b->lock);
4887     ret = fair_assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
4888     raw_spin_unlock(&cfs_b->lock);
4889 
4890     return ret;
4891 }
4892 
fair_account_cfs_rq_runtime(struct cfs_rq * cfs_rq,u64 delta_exec)4893 static void fair_account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4894 {
4895     /* dock delta_exec before expiring quota (as it could span periods) */
4896     cfs_rq->runtime_remaining -= delta_exec;
4897 
4898     if (likely(cfs_rq->runtime_remaining > 0)) {
4899         return;
4900     }
4901 
4902     if (cfs_rq->throttled) {
4903         return;
4904     }
4905     /*
4906      * if we're unable to extend our runtime we resched so that the active
4907      * hierarchy can be throttled
4908      */
4909     if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) {
4910         resched_curr(rq_of(cfs_rq));
4911     }
4912 }
4913 
account_cfs_rq_runtime(struct cfs_rq * cfs_rq,u64 delta_exec)4914 static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4915 {
4916     if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) {
4917         return;
4918     }
4919 
4920     fair_account_cfs_rq_runtime(cfs_rq, delta_exec);
4921 }
4922 
cfs_rq_throttled(struct cfs_rq * cfs_rq)4923 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4924 {
4925     return cfs_bandwidth_used() && cfs_rq->throttled;
4926 }
4927 
4928 /* check whether cfs_rq, or any parent, is throttled */
throttled_hierarchy(struct cfs_rq * cfs_rq)4929 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4930 {
4931     return cfs_bandwidth_used() && cfs_rq->throttle_count;
4932 }
4933 
4934 /*
4935  * Ensure that neither of the group entities corresponding to src_cpu or
4936  * dest_cpu are members of a throttled hierarchy when performing group
4937  * load-balance operations.
4938  */
throttled_lb_pair(struct task_group * tg,int src_cpu,int dest_cpu)4939 static inline int throttled_lb_pair(struct task_group *tg, int src_cpu, int dest_cpu)
4940 {
4941     struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
4942 
4943     src_cfs_rq = tg->cfs_rq[src_cpu];
4944     dest_cfs_rq = tg->cfs_rq[dest_cpu];
4945 
4946     return throttled_hierarchy(src_cfs_rq) || throttled_hierarchy(dest_cfs_rq);
4947 }
4948 
tg_unthrottle_up(struct task_group * tg,void * data)4949 static int tg_unthrottle_up(struct task_group *tg, void *data)
4950 {
4951     struct rq *rq = data;
4952     struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4953 
4954     cfs_rq->throttle_count--;
4955     if (!cfs_rq->throttle_count) {
4956         cfs_rq->throttled_clock_pelt_time += rq_clock_task(rq) - cfs_rq->throttled_clock_pelt;
4957 
4958         /* Add cfs_rq with already running entity in the list */
4959         if (cfs_rq->nr_running >= 1) {
4960             list_add_leaf_cfs_rq(cfs_rq);
4961         }
4962     }
4963 
4964     return 0;
4965 }
4966 
tg_throttle_down(struct task_group * tg,void * data)4967 static int tg_throttle_down(struct task_group *tg, void *data)
4968 {
4969     struct rq *rq = data;
4970     struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4971 
4972     /* group is entering throttled state, stop time */
4973     if (!cfs_rq->throttle_count) {
4974         cfs_rq->throttled_clock_pelt = rq_clock_task(rq);
4975         list_del_leaf_cfs_rq(cfs_rq);
4976     }
4977     cfs_rq->throttle_count++;
4978 
4979     return 0;
4980 }
4981 
throttle_cfs_rq(struct cfs_rq * cfs_rq)4982 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
4983 {
4984     struct rq *rq = rq_of(cfs_rq);
4985     struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4986     struct sched_entity *se;
4987     long task_delta, idle_task_delta, dequeue = 1;
4988 
4989     raw_spin_lock(&cfs_b->lock);
4990     /* This will start the period timer if necessary */
4991     if (fair_assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
4992         /*
4993          * We have raced with bandwidth becoming available, and if we
4994          * actually throttled the timer might not unthrottle us for an
4995          * entire period. We additionally needed to make sure that any
4996          * subsequent check_cfs_rq_runtime calls agree not to throttle
4997          * us, as we may commit to do cfs put_prev+pick_next, so we ask
4998          * for 1ns of runtime rather than just check cfs_b.
4999          */
5000         dequeue = 0;
5001     } else {
5002         list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
5003     }
5004     raw_spin_unlock(&cfs_b->lock);
5005 
5006     if (!dequeue) {
5007         return false; /* Throttle no longer required. */
5008     }
5009 
5010     se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
5011 
5012     /* freeze hierarchy runnable averages while throttled */
5013     rcu_read_lock();
5014     walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
5015     rcu_read_unlock();
5016 
5017     task_delta = cfs_rq->h_nr_running;
5018     idle_task_delta = cfs_rq->idle_h_nr_running;
5019     for_each_sched_entity(se) {
5020         struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5021         /* throttled entity or throttle-on-deactivate */
5022         if (!se->on_rq) {
5023             break;
5024         }
5025 
5026         if (dequeue) {
5027             dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
5028         } else {
5029             update_load_avg(qcfs_rq, se, 0);
5030             se_update_runnable(se);
5031         }
5032 
5033         qcfs_rq->h_nr_running -= task_delta;
5034         qcfs_rq->idle_h_nr_running -= idle_task_delta;
5035         walt_dec_throttled_cfs_rq_stats(&qcfs_rq->walt_stats, cfs_rq);
5036 
5037         if (qcfs_rq->load.weight) {
5038             dequeue = 0;
5039         }
5040     }
5041 
5042     if (!se) {
5043         sub_nr_running(rq, task_delta);
5044         walt_dec_throttled_cfs_rq_stats(&rq->walt_stats, cfs_rq);
5045     }
5046 
5047     /*
5048      * Note: distribution will already see us throttled via the
5049      * throttled-list.  rq->lock protects completion.
5050      */
5051     cfs_rq->throttled = 1;
5052     cfs_rq->throttled_clock = rq_clock(rq);
5053     return true;
5054 }
5055 
unthrottle_cfs_rq(struct cfs_rq * cfs_rq)5056 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
5057 {
5058     struct rq *rq = rq_of(cfs_rq);
5059     struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5060     struct sched_entity *se;
5061     long task_delta, idle_task_delta;
5062     struct cfs_rq *tcfs_rq __maybe_unused = cfs_rq;
5063 
5064     se = cfs_rq->tg->se[cpu_of(rq)];
5065 
5066     cfs_rq->throttled = 0;
5067 
5068     update_rq_clock(rq);
5069 
5070     raw_spin_lock(&cfs_b->lock);
5071     cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
5072     list_del_rcu(&cfs_rq->throttled_list);
5073     raw_spin_unlock(&cfs_b->lock);
5074 
5075     /* update hierarchical throttle state */
5076     walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
5077 
5078     if (!cfs_rq->load.weight) {
5079         return;
5080     }
5081 
5082     task_delta = cfs_rq->h_nr_running;
5083     idle_task_delta = cfs_rq->idle_h_nr_running;
5084     for_each_sched_entity(se) {
5085         if (se->on_rq) {
5086             break;
5087         }
5088         cfs_rq = cfs_rq_of(se);
5089         enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
5090 
5091         cfs_rq->h_nr_running += task_delta;
5092         cfs_rq->idle_h_nr_running += idle_task_delta;
5093         walt_inc_throttled_cfs_rq_stats(&cfs_rq->walt_stats, tcfs_rq);
5094 
5095         /* end evaluation on encountering a throttled cfs_rq */
5096         if (cfs_rq_throttled(cfs_rq)) {
5097             goto unthrottle_throttle;
5098         }
5099     }
5100 
5101     for_each_sched_entity(se) {
5102         cfs_rq = cfs_rq_of(se);
5103 
5104         update_load_avg(cfs_rq, se, UPDATE_TG);
5105         se_update_runnable(se);
5106 
5107         cfs_rq->h_nr_running += task_delta;
5108         cfs_rq->idle_h_nr_running += idle_task_delta;
5109         walt_inc_throttled_cfs_rq_stats(&cfs_rq->walt_stats, tcfs_rq);
5110 
5111         /* end evaluation on encountering a throttled cfs_rq */
5112         if (cfs_rq_throttled(cfs_rq)) {
5113             goto unthrottle_throttle;
5114         }
5115 
5116         /*
5117          * One parent has been throttled and cfs_rq removed from the
5118          * list. Add it back to not break the leaf list.
5119          */
5120         if (throttled_hierarchy(cfs_rq)) {
5121             list_add_leaf_cfs_rq(cfs_rq);
5122         }
5123     }
5124 
5125     /* At this point se is NULL and we are at root level */
5126     add_nr_running(rq, task_delta);
5127     walt_inc_throttled_cfs_rq_stats(&rq->walt_stats, tcfs_rq);
5128 
5129 unthrottle_throttle:
5130     /*
5131      * The cfs_rq_throttled() breaks in the above iteration can result in
5132      * incomplete leaf list maintenance, resulting in triggering the
5133      * assertion below.
5134      */
5135     for_each_sched_entity(se) {
5136         cfs_rq = cfs_rq_of(se);
5137         if (list_add_leaf_cfs_rq(cfs_rq)) {
5138             break;
5139         }
5140     }
5141 
5142     assert_list_leaf_cfs_rq(rq);
5143 
5144     /* Determine whether we need to wake up potentially idle CPU: */
5145     if (rq->curr == rq->idle && rq->cfs.nr_running) {
5146         resched_curr(rq);
5147     }
5148 }
5149 
distribute_cfs_runtime(struct cfs_bandwidth * cfs_b)5150 static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
5151 {
5152     struct cfs_rq *cfs_rq;
5153     u64 runtime, remaining = 1;
5154 
5155     rcu_read_lock();
5156     list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, throttled_list)
5157     {
5158         struct rq *rq = rq_of(cfs_rq);
5159         struct rq_flags rf;
5160 
5161         rq_lock_irqsave(rq, &rf);
5162         if (!cfs_rq_throttled(cfs_rq)) {
5163             goto next;
5164         }
5165 
5166         /* By the above check, this should never be true */
5167         SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
5168 
5169         raw_spin_lock(&cfs_b->lock);
5170         runtime = -cfs_rq->runtime_remaining + 1;
5171         if (runtime > cfs_b->runtime) {
5172             runtime = cfs_b->runtime;
5173         }
5174         cfs_b->runtime -= runtime;
5175         remaining = cfs_b->runtime;
5176         raw_spin_unlock(&cfs_b->lock);
5177 
5178         cfs_rq->runtime_remaining += runtime;
5179 
5180         /* we check whether we're throttled above */
5181         if (cfs_rq->runtime_remaining > 0) {
5182             unthrottle_cfs_rq(cfs_rq);
5183         }
5184 
5185     next:
5186         rq_unlock_irqrestore(rq, &rf);
5187 
5188         if (!remaining) {
5189             break;
5190         }
5191     }
5192     rcu_read_unlock();
5193 }
5194 
5195 /*
5196  * Responsible for refilling a task_group's bandwidth and unthrottling its
5197  * cfs_rqs as appropriate. If there has been no activity within the last
5198  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
5199  * used to track this state.
5200  */
do_sched_cfs_period_timer(struct cfs_bandwidth * cfs_b,int overrun,unsigned long flags)5201 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
5202 {
5203     int throttled;
5204 
5205     /* no need to continue the timer with no bandwidth constraint */
5206     if (cfs_b->quota == RUNTIME_INF) {
5207         goto out_deactivate;
5208     }
5209 
5210     throttled = !list_empty(&cfs_b->throttled_cfs_rq);
5211     cfs_b->nr_periods += overrun;
5212 
5213     /*
5214      * idle depends on !throttled (for the case of a large deficit), and if
5215      * we're going inactive then everything else can be deferred
5216      */
5217     if (cfs_b->idle && !throttled) {
5218         goto out_deactivate;
5219     }
5220 
5221     __refill_cfs_bandwidth_runtime(cfs_b);
5222 
5223     if (!throttled) {
5224         /* mark as potentially idle for the upcoming period */
5225         cfs_b->idle = 1;
5226         return 0;
5227     }
5228 
5229     /* account preceding periods in which throttling occurred */
5230     cfs_b->nr_throttled += overrun;
5231 
5232     /*
5233      * This check is repeated as we release cfs_b->lock while we unthrottle.
5234      */
5235     while (throttled && cfs_b->runtime > 0) {
5236         raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5237         /* we can't nest cfs_b->lock while distributing bandwidth */
5238         distribute_cfs_runtime(cfs_b);
5239         raw_spin_lock_irqsave(&cfs_b->lock, flags);
5240 
5241         throttled = !list_empty(&cfs_b->throttled_cfs_rq);
5242     }
5243 
5244     /*
5245      * While we are ensured activity in the period following an
5246      * unthrottle, this also covers the case in which the new bandwidth is
5247      * insufficient to cover the existing bandwidth deficit.  (Forcing the
5248      * timer to remain active while there are any throttled entities.)
5249      */
5250     cfs_b->idle = 0;
5251 
5252     return 0;
5253 
5254 out_deactivate:
5255     return 1;
5256 }
5257 
5258 /* a cfs_rq won't donate quota below this amount */
5259 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
5260 /* minimum remaining period time to redistribute slack quota */
5261 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
5262 /* how long we wait to gather additional slack before distributing */
5263 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
5264 
5265 /*
5266  * Are we near the end of the current quota period?
5267  *
5268  * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
5269  * hrtimer base being cleared by hrtimer_start. In the case of
5270  * migrate_hrtimers, base is never cleared, so we are fine.
5271  */
runtime_refresh_within(struct cfs_bandwidth * cfs_b,u64 min_expire)5272 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
5273 {
5274     struct hrtimer *refresh_timer = &cfs_b->period_timer;
5275     s64 remaining;
5276 
5277     /* if the call-back is running a quota refresh is already occurring */
5278     if (hrtimer_callback_running(refresh_timer)) {
5279         return 1;
5280     }
5281 
5282     /* is a quota refresh about to occur? */
5283     remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
5284     if (remaining < (s64)min_expire) {
5285         return 1;
5286     }
5287 
5288     return 0;
5289 }
5290 
start_cfs_slack_bandwidth(struct cfs_bandwidth * cfs_b)5291 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
5292 {
5293     u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
5294 
5295     /* if there's a quota refresh soon don't bother with slack */
5296     if (runtime_refresh_within(cfs_b, min_left)) {
5297         return;
5298     }
5299 
5300     /* don't push forwards an existing deferred unthrottle */
5301     if (cfs_b->slack_started) {
5302         return;
5303     }
5304     cfs_b->slack_started = true;
5305 
5306     hrtimer_start(&cfs_b->slack_timer, ns_to_ktime(cfs_bandwidth_slack_period), HRTIMER_MODE_REL);
5307 }
5308 
5309 /* we know any runtime found here is valid as update_curr() precedes return */
fair_return_cfs_rq_runtime(struct cfs_rq * cfs_rq)5310 static void fair_return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5311 {
5312     struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5313     s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
5314 
5315     if (slack_runtime <= 0) {
5316         return;
5317     }
5318 
5319     raw_spin_lock(&cfs_b->lock);
5320     if (cfs_b->quota != RUNTIME_INF) {
5321         cfs_b->runtime += slack_runtime;
5322 
5323         /* we are under rq->lock, defer unthrottling using a timer */
5324         if (cfs_b->runtime > sched_cfs_bandwidth_slice() && !list_empty(&cfs_b->throttled_cfs_rq)) {
5325             start_cfs_slack_bandwidth(cfs_b);
5326         }
5327     }
5328     raw_spin_unlock(&cfs_b->lock);
5329 
5330     /* even if it's not valid for return we don't want to try again */
5331     cfs_rq->runtime_remaining -= slack_runtime;
5332 }
5333 
return_cfs_rq_runtime(struct cfs_rq * cfs_rq)5334 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5335 {
5336     if (!cfs_bandwidth_used()) {
5337         return;
5338     }
5339 
5340     if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) {
5341         return;
5342     }
5343 
5344     fair_return_cfs_rq_runtime(cfs_rq);
5345 }
5346 
5347 /*
5348  * This is done with a timer (instead of inline with bandwidth return) since
5349  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
5350  */
do_sched_cfs_slack_timer(struct cfs_bandwidth * cfs_b)5351 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
5352 {
5353     u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
5354     unsigned long flags;
5355 
5356     /* confirm we're still not at a refresh boundary */
5357     raw_spin_lock_irqsave(&cfs_b->lock, flags);
5358     cfs_b->slack_started = false;
5359 
5360     if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
5361         raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5362         return;
5363     }
5364 
5365     if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
5366         runtime = cfs_b->runtime;
5367     }
5368 
5369     raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5370 
5371     if (!runtime) {
5372         return;
5373     }
5374 
5375     distribute_cfs_runtime(cfs_b);
5376 
5377     raw_spin_lock_irqsave(&cfs_b->lock, flags);
5378     raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5379 }
5380 
5381 /*
5382  * When a group wakes up we want to make sure that its quota is not already
5383  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
5384  * runtime as update_curr() throttling can not not trigger until it's on-rq.
5385  */
check_enqueue_throttle(struct cfs_rq * cfs_rq)5386 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
5387 {
5388     if (!cfs_bandwidth_used()) {
5389         return;
5390     }
5391 
5392     /* an active group must be handled by the update_curr()->put() path */
5393     if (!cfs_rq->runtime_enabled || cfs_rq->curr) {
5394         return;
5395     }
5396 
5397     /* ensure the group is not already throttled */
5398     if (cfs_rq_throttled(cfs_rq)) {
5399         return;
5400     }
5401 
5402     /* update runtime allocation */
5403     account_cfs_rq_runtime(cfs_rq, 0);
5404     if (cfs_rq->runtime_remaining <= 0) {
5405         throttle_cfs_rq(cfs_rq);
5406     }
5407 }
5408 
sync_throttle(struct task_group * tg,int cpu)5409 static void sync_throttle(struct task_group *tg, int cpu)
5410 {
5411     struct cfs_rq *pcfs_rq, *cfs_rq;
5412 
5413     if (!cfs_bandwidth_used()) {
5414         return;
5415     }
5416 
5417     if (!tg->parent) {
5418         return;
5419     }
5420 
5421     cfs_rq = tg->cfs_rq[cpu];
5422     pcfs_rq = tg->parent->cfs_rq[cpu];
5423 
5424     cfs_rq->throttle_count = pcfs_rq->throttle_count;
5425     cfs_rq->throttled_clock_pelt = rq_clock_task(cpu_rq(cpu));
5426 }
5427 
5428 /* conditionally throttle active cfs_rq's from put_prev_entity() */
check_cfs_rq_runtime(struct cfs_rq * cfs_rq)5429 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5430 {
5431     if (!cfs_bandwidth_used()) {
5432         return false;
5433     }
5434 
5435     if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) {
5436         return false;
5437     }
5438 
5439     /*
5440      * it's possible for a throttled entity to be forced into a running
5441      * state (e.g. set_curr_task), in this case we're finished.
5442      */
5443     if (cfs_rq_throttled(cfs_rq)) {
5444         return true;
5445     }
5446 
5447     return throttle_cfs_rq(cfs_rq);
5448 }
5449 
sched_cfs_slack_timer(struct hrtimer * timer)5450 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
5451 {
5452     struct cfs_bandwidth *cfs_b = container_of(timer, struct cfs_bandwidth, slack_timer);
5453 
5454     do_sched_cfs_slack_timer(cfs_b);
5455 
5456     return HRTIMER_NORESTART;
5457 }
5458 
sched_cfs_period_timer(struct hrtimer * timer)5459 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
5460 {
5461     struct cfs_bandwidth *cfs_b = container_of(timer, struct cfs_bandwidth, period_timer);
5462     unsigned long flags;
5463     int overrun;
5464     int idle = 0;
5465     int count = 0;
5466 
5467     raw_spin_lock_irqsave(&cfs_b->lock, flags);
5468     for (;;) {
5469         overrun = hrtimer_forward_now(timer, cfs_b->period);
5470         if (!overrun) {
5471             break;
5472         }
5473 
5474         idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
5475 
5476         if (++count > 0x3) {
5477             u64 new, old = ktime_to_ns(cfs_b->period);
5478 
5479             /*
5480              * Grow period by a factor of 2 to avoid losing precision.
5481              * Precision loss in the quota/period ratio can cause __cfs_schedulable
5482              * to fail.
5483              */
5484             new = old * 0x2;
5485             if (new < max_cfs_quota_period) {
5486                 cfs_b->period = ns_to_ktime(new);
5487                 cfs_b->quota *= 0x2;
5488 
5489                 pr_warn_ratelimited("cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, "
5490                                     "cfs_quota_us = %lld)\n",
5491                                     smp_processor_id(), div_u64(new, NSEC_PER_USEC),
5492                                     div_u64(cfs_b->quota, NSEC_PER_USEC));
5493             } else {
5494                 pr_warn_ratelimited("cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing "
5495                                     "precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
5496                                     smp_processor_id(), div_u64(old, NSEC_PER_USEC),
5497                                     div_u64(cfs_b->quota, NSEC_PER_USEC));
5498             }
5499 
5500             /* reset count so we don't come right back in here */
5501             count = 0;
5502         }
5503     }
5504     if (idle) {
5505         cfs_b->period_active = 0;
5506     }
5507     raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5508 
5509     return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
5510 }
5511 
init_cfs_bandwidth(struct cfs_bandwidth * cfs_b)5512 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
5513 {
5514     raw_spin_lock_init(&cfs_b->lock);
5515     cfs_b->runtime = 0;
5516     cfs_b->quota = RUNTIME_INF;
5517     cfs_b->period = ns_to_ktime(default_cfs_period());
5518 
5519     INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
5520     hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
5521     cfs_b->period_timer.function = sched_cfs_period_timer;
5522     hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5523     cfs_b->slack_timer.function = sched_cfs_slack_timer;
5524     cfs_b->slack_started = false;
5525 }
5526 
init_cfs_rq_runtime(struct cfs_rq * cfs_rq)5527 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5528 {
5529     cfs_rq->runtime_enabled = 0;
5530     INIT_LIST_HEAD(&cfs_rq->throttled_list);
5531     walt_init_cfs_rq_stats(cfs_rq);
5532 }
5533 
start_cfs_bandwidth(struct cfs_bandwidth * cfs_b)5534 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
5535 {
5536     lockdep_assert_held(&cfs_b->lock);
5537 
5538     if (cfs_b->period_active) {
5539         return;
5540     }
5541 
5542     cfs_b->period_active = 1;
5543     hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
5544     hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
5545 }
5546 
destroy_cfs_bandwidth(struct cfs_bandwidth * cfs_b)5547 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
5548 {
5549     /* init_cfs_bandwidth() was not called */
5550     if (!cfs_b->throttled_cfs_rq.next) {
5551         return;
5552     }
5553 
5554     hrtimer_cancel(&cfs_b->period_timer);
5555     hrtimer_cancel(&cfs_b->slack_timer);
5556 }
5557 
5558 /*
5559  * Both these CPU hotplug callbacks race against unregister_fair_sched_group()
5560  *
5561  * The race is harmless, since modifying bandwidth settings of unhooked group
5562  * bits doesn't do much.
5563  */
5564 
5565 /* cpu online calback */
update_runtime_enabled(struct rq * rq)5566 static void __maybe_unused update_runtime_enabled(struct rq *rq)
5567 {
5568     struct task_group *tg;
5569 
5570     lockdep_assert_held(&rq->lock);
5571 
5572     rcu_read_lock();
5573     list_for_each_entry_rcu(tg, &task_groups, list)
5574     {
5575         struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
5576         struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5577 
5578         raw_spin_lock(&cfs_b->lock);
5579         cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
5580         raw_spin_unlock(&cfs_b->lock);
5581     }
5582     rcu_read_unlock();
5583 }
5584 
5585 /* cpu offline callback */
unthrottle_offline_cfs_rqs(struct rq * rq)5586 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
5587 {
5588     struct task_group *tg;
5589 
5590     lockdep_assert_held(&rq->lock);
5591 
5592     rcu_read_lock();
5593     list_for_each_entry_rcu(tg, &task_groups, list)
5594     {
5595         struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5596 
5597         if (!cfs_rq->runtime_enabled) {
5598             continue;
5599         }
5600 
5601         /*
5602          * clock_task is not advancing so we just need to make sure
5603          * there's some valid quota amount
5604          */
5605         cfs_rq->runtime_remaining = 1;
5606         /*
5607          * Offline rq is schedulable till CPU is completely disabled
5608          * in take_cpu_down(), so we prevent new cfs throttling here.
5609          */
5610         cfs_rq->runtime_enabled = 0;
5611 
5612         if (cfs_rq_throttled(cfs_rq)) {
5613             unthrottle_cfs_rq(cfs_rq);
5614         }
5615     }
5616     rcu_read_unlock();
5617 }
5618 
5619 #else /* CONFIG_CFS_BANDWIDTH */
5620 
cfs_bandwidth_used(void)5621 static inline bool cfs_bandwidth_used(void)
5622 {
5623     return false;
5624 }
5625 
account_cfs_rq_runtime(struct cfs_rq * cfs_rq,u64 delta_exec)5626 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
5627 {
5628 }
check_cfs_rq_runtime(struct cfs_rq * cfs_rq)5629 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5630 {
5631     return false;
5632 }
check_enqueue_throttle(struct cfs_rq * cfs_rq)5633 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
5634 {
5635 }
sync_throttle(struct task_group * tg,int cpu)5636 static inline void sync_throttle(struct task_group *tg, int cpu)
5637 {
5638 }
return_cfs_rq_runtime(struct cfs_rq * cfs_rq)5639 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5640 {
5641 }
5642 
cfs_rq_throttled(struct cfs_rq * cfs_rq)5643 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
5644 {
5645     return 0;
5646 }
5647 
throttled_hierarchy(struct cfs_rq * cfs_rq)5648 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
5649 {
5650     return 0;
5651 }
5652 
throttled_lb_pair(struct task_group * tg,int src_cpu,int dest_cpu)5653 static inline int throttled_lb_pair(struct task_group *tg, int src_cpu, int dest_cpu)
5654 {
5655     return 0;
5656 }
5657 
init_cfs_bandwidth(struct cfs_bandwidth * cfs_b)5658 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
5659 {
5660 }
5661 
5662 #ifdef CONFIG_FAIR_GROUP_SCHED
init_cfs_rq_runtime(struct cfs_rq * cfs_rq)5663 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5664 {
5665 }
5666 #endif
5667 
tg_cfs_bandwidth(struct task_group * tg)5668 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
5669 {
5670     return NULL;
5671 }
destroy_cfs_bandwidth(struct cfs_bandwidth * cfs_b)5672 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
5673 {
5674 }
update_runtime_enabled(struct rq * rq)5675 static inline void update_runtime_enabled(struct rq *rq)
5676 {
5677 }
unthrottle_offline_cfs_rqs(struct rq * rq)5678 static inline void unthrottle_offline_cfs_rqs(struct rq *rq)
5679 {
5680 }
5681 
5682 #endif /* CONFIG_CFS_BANDWIDTH */
5683 
5684 /**************************************************
5685  * CFS operations on tasks:
5686  */
5687 
5688 #ifdef CONFIG_SCHED_HRTICK
hrtick_start_fair(struct rq * rq,struct task_struct * p)5689 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
5690 {
5691     struct sched_entity *se = &p->se;
5692     struct cfs_rq *cfs_rq = cfs_rq_of(se);
5693 
5694     SCHED_WARN_ON(task_rq(p) != rq);
5695 
5696     if (rq->cfs.h_nr_running > 1) {
5697         u64 slice = sched_slice(cfs_rq, se);
5698         u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
5699         s64 delta = slice - ran;
5700 
5701         if (delta < 0) {
5702             if (rq->curr == p) {
5703                 resched_curr(rq);
5704             }
5705             return;
5706         }
5707         hrtick_start(rq, delta);
5708     }
5709 }
5710 
5711 /*
5712  * called from enqueue/dequeue and updates the hrtick when the
5713  * current task is from our class and nr_running is low enough
5714  * to matter.
5715  */
hrtick_update(struct rq * rq)5716 static void hrtick_update(struct rq *rq)
5717 {
5718     struct task_struct *curr = rq->curr;
5719 
5720     if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) {
5721         return;
5722     }
5723 
5724     if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) {
5725         hrtick_start_fair(rq, curr);
5726     }
5727 }
5728 #else /* !CONFIG_SCHED_HRTICK */
hrtick_start_fair(struct rq * rq,struct task_struct * p)5729 static inline void hrtick_start_fair(struct rq *rq, struct task_struct *p)
5730 {
5731 }
5732 
hrtick_update(struct rq * rq)5733 static inline void hrtick_update(struct rq *rq)
5734 {
5735 }
5736 #endif
5737 
5738 #ifdef CONFIG_SMP
cpu_overutilized(int cpu)5739 static inline bool cpu_overutilized(int cpu)
5740 {
5741     return !fits_capacity(cpu_util(cpu), capacity_of(cpu));
5742 }
5743 
update_overutilized_status(struct rq * rq)5744 static inline void update_overutilized_status(struct rq *rq)
5745 {
5746     if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
5747         WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
5748         trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
5749     }
5750 }
5751 #else
update_overutilized_status(struct rq * rq)5752 static inline void update_overutilized_status(struct rq *rq)
5753 {
5754 }
5755 #endif
5756 
5757 /* Runqueue only has SCHED_IDLE tasks enqueued */
sched_idle_rq(struct rq * rq)5758 static int sched_idle_rq(struct rq *rq)
5759 {
5760     return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running && rq->nr_running);
5761 }
5762 
5763 #ifdef CONFIG_SMP
sched_idle_cpu(int cpu)5764 static int sched_idle_cpu(int cpu)
5765 {
5766     return sched_idle_rq(cpu_rq(cpu));
5767 }
5768 #endif
5769 
5770 static void set_next_buddy(struct sched_entity *se);
5771 #ifdef CONFIG_SCHED_LATENCY_NICE
check_preempt_from_idle(struct cfs_rq * cfs,struct sched_entity * se)5772 static void check_preempt_from_idle(struct cfs_rq *cfs, struct sched_entity *se)
5773 {
5774     struct sched_entity *next;
5775     if (se->latency_weight <= 0)
5776         return;
5777     if (cfs->nr_running <= 1)
5778         return;
5779     if (cfs->next)
5780         next = cfs->next;
5781     else
5782         next = __pick_first_entity(cfs);
5783     if (next && wakeup_preempt_entity(next, se) == 1)
5784         set_next_buddy(se);
5785 }
5786 #endif
5787 /*
5788  * The enqueue_task method is called before nr_running is
5789  * increased. Here we update the fair scheduling stats and
5790  * then put the task into the rbtree:
5791  */
enqueue_task_fair(struct rq * rq,struct task_struct * p,int flags)5792 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5793 {
5794     struct cfs_rq *cfs_rq;
5795     struct sched_entity *se = &p->se;
5796     int idle_h_nr_running = task_has_idle_policy(p);
5797     int task_new = !(flags & ENQUEUE_WAKEUP);
5798 
5799     /*
5800      * The code below (indirectly) updates schedutil which looks at
5801      * the cfs_rq utilization to select a frequency.
5802      * Let's add the task's estimated utilization to the cfs_rq's
5803      * estimated utilization, before we update schedutil.
5804      */
5805     util_est_enqueue(&rq->cfs, p);
5806 
5807     /*
5808      * If in_iowait is set, the code below may not trigger any cpufreq
5809      * utilization updates, so do it here explicitly with the IOWAIT flag
5810      * passed.
5811      */
5812     if (p->in_iowait) {
5813         cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
5814     }
5815 
5816     for_each_sched_entity(se) {
5817         if (se->on_rq) {
5818             break;
5819         }
5820         cfs_rq = cfs_rq_of(se);
5821         enqueue_entity(cfs_rq, se, flags);
5822 
5823         cfs_rq->h_nr_running++;
5824         cfs_rq->idle_h_nr_running += idle_h_nr_running;
5825         walt_inc_cfs_rq_stats(cfs_rq, p);
5826 
5827         /* end evaluation on encountering a throttled cfs_rq */
5828         if (cfs_rq_throttled(cfs_rq)) {
5829             goto enqueue_throttle;
5830         }
5831 
5832         flags = ENQUEUE_WAKEUP;
5833     }
5834 
5835     for_each_sched_entity(se) {
5836         cfs_rq = cfs_rq_of(se);
5837 
5838         update_load_avg(cfs_rq, se, UPDATE_TG);
5839         se_update_runnable(se);
5840         update_cfs_group(se);
5841 
5842         cfs_rq->h_nr_running++;
5843         cfs_rq->idle_h_nr_running += idle_h_nr_running;
5844         walt_inc_cfs_rq_stats(cfs_rq, p);
5845 
5846         /* end evaluation on encountering a throttled cfs_rq */
5847         if (cfs_rq_throttled(cfs_rq)) {
5848             goto enqueue_throttle;
5849         }
5850 
5851         /*
5852          * One parent has been throttled and cfs_rq removed from the
5853          * list. Add it back to not break the leaf list.
5854          */
5855         if (throttled_hierarchy(cfs_rq)) {
5856             list_add_leaf_cfs_rq(cfs_rq);
5857         }
5858     }
5859 
5860     /* At this point se is NULL and we are at root level */
5861     add_nr_running(rq, 1);
5862     inc_rq_walt_stats(rq, p);
5863     /*
5864      * Since new tasks are assigned an initial util_avg equal to
5865      * half of the spare capacity of their CPU, tiny tasks have the
5866      * ability to cross the overutilized threshold, which will
5867      * result in the load balancer ruining all the task placement
5868      * done by EAS. As a way to mitigate that effect, do not account
5869      * for the first enqueue operation of new tasks during the
5870      * overutilized flag detection.
5871      *
5872      * A better way of solving this problem would be to wait for
5873      * the PELT signals of tasks to converge before taking them
5874      * into account, but that is not straightforward to implement,
5875      * and the following generally works well enough in practice.
5876      */
5877     if (!task_new) {
5878         update_overutilized_status(rq);
5879     }
5880 #ifdef CONFIG_SCHED_LATENCY_NICE
5881     if (rq->curr == rq->idle)
5882         check_preempt_from_idle(cfs_rq_of(&p->se), &p->se);
5883 #endif
5884 
5885 enqueue_throttle:
5886     if (cfs_bandwidth_used()) {
5887         /*
5888          * When bandwidth control is enabled; the cfs_rq_throttled()
5889          * breaks in the above iteration can result in incomplete
5890          * leaf list maintenance, resulting in triggering the assertion
5891          * below.
5892          */
5893         for_each_sched_entity(se) {
5894             cfs_rq = cfs_rq_of(se);
5895             if (list_add_leaf_cfs_rq(cfs_rq)) {
5896                 break;
5897             }
5898         }
5899     }
5900 
5901     assert_list_leaf_cfs_rq(rq);
5902 
5903     hrtick_update(rq);
5904 }
5905 
5906 
5907 /*
5908  * The dequeue_task method is called before nr_running is
5909  * decreased. We remove the task from the rbtree and
5910  * update the fair scheduling stats
5911  */
dequeue_task_fair(struct rq * rq,struct task_struct * p,int flags)5912 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5913 {
5914     struct cfs_rq *cfs_rq;
5915     struct sched_entity *se = &p->se;
5916     int task_sleep = flags & DEQUEUE_SLEEP;
5917     int idle_h_nr_running = task_has_idle_policy(p);
5918     bool was_sched_idle = sched_idle_rq(rq);
5919 
5920     util_est_dequeue(&rq->cfs, p);
5921 
5922     for_each_sched_entity(se) {
5923         cfs_rq = cfs_rq_of(se);
5924         dequeue_entity(cfs_rq, se, flags);
5925 
5926         cfs_rq->h_nr_running--;
5927         cfs_rq->idle_h_nr_running -= idle_h_nr_running;
5928         walt_dec_cfs_rq_stats(cfs_rq, p);
5929 
5930         /* end evaluation on encountering a throttled cfs_rq */
5931         if (cfs_rq_throttled(cfs_rq)) {
5932             goto dequeue_throttle;
5933         }
5934 
5935         /* Don't dequeue parent if it has other entities besides us */
5936         if (cfs_rq->load.weight) {
5937             /* Avoid re-evaluating load for this entity: */
5938             se = parent_entity(se);
5939             /*
5940              * Bias pick_next to pick a task from this cfs_rq, as
5941              * p is sleeping when it is within its sched_slice.
5942              */
5943             if (task_sleep && se && !throttled_hierarchy(cfs_rq)) {
5944                 set_next_buddy(se);
5945             }
5946             break;
5947         }
5948         flags |= DEQUEUE_SLEEP;
5949     }
5950 
5951     for_each_sched_entity(se) {
5952         cfs_rq = cfs_rq_of(se);
5953 
5954         update_load_avg(cfs_rq, se, UPDATE_TG);
5955         se_update_runnable(se);
5956         update_cfs_group(se);
5957 
5958         cfs_rq->h_nr_running--;
5959         cfs_rq->idle_h_nr_running -= idle_h_nr_running;
5960         walt_dec_cfs_rq_stats(cfs_rq, p);
5961 
5962         /* end evaluation on encountering a throttled cfs_rq */
5963         if (cfs_rq_throttled(cfs_rq)) {
5964             goto dequeue_throttle;
5965         }
5966     }
5967 
5968     /* At this point se is NULL and we are at root level */
5969     sub_nr_running(rq, 1);
5970     dec_rq_walt_stats(rq, p);
5971 
5972     /* balance early to pull high priority tasks */
5973     if (unlikely(!was_sched_idle && sched_idle_rq(rq))) {
5974         rq->next_balance = jiffies;
5975     }
5976 
5977 dequeue_throttle:
5978     util_est_update(&rq->cfs, p, task_sleep);
5979     hrtick_update(rq);
5980 }
5981 
5982 #ifdef CONFIG_SMP
5983 
5984 /* Working cpumask for: load_balance, load_balance_newidle. */
5985 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
5986 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
5987 
5988 #ifdef CONFIG_NO_HZ_COMMON
5989 
5990 static struct {
5991     cpumask_var_t idle_cpus_mask;
5992     atomic_t nr_cpus;
5993     int has_blocked;            /* Idle CPUS has blocked load */
5994     unsigned long next_balance; /* in jiffy units */
5995     unsigned long next_blocked; /* Next update of blocked load in jiffies */
5996 } nohz ____cacheline_aligned;
5997 
5998 #endif /* CONFIG_NO_HZ_COMMON */
5999 
cpu_load(struct rq * rq)6000 static unsigned long cpu_load(struct rq *rq)
6001 {
6002     return cfs_rq_load_avg(&rq->cfs);
6003 }
6004 
6005 /*
6006  * cpu_load_without - compute CPU load without any contributions from *p
6007  * @cpu: the CPU which load is requested
6008  * @p: the task which load should be discounted
6009  *
6010  * The load of a CPU is defined by the load of tasks currently enqueued on that
6011  * CPU as well as tasks which are currently sleeping after an execution on that
6012  * CPU.
6013  *
6014  * This method returns the load of the specified CPU by discounting the load of
6015  * the specified task, whenever the task is currently contributing to the CPU
6016  * load.
6017  */
cpu_load_without(struct rq * rq,struct task_struct * p)6018 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
6019 {
6020     struct cfs_rq *cfs_rq;
6021     unsigned int load;
6022 
6023     /* Task has no contribution or is new */
6024     if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
6025         return cpu_load(rq);
6026     }
6027 
6028     cfs_rq = &rq->cfs;
6029     load = READ_ONCE(cfs_rq->avg.load_avg);
6030 
6031     /* Discount task's util from CPU's util */
6032     lsub_positive(&load, task_h_load(p));
6033 
6034     return load;
6035 }
6036 
cpu_runnable(struct rq * rq)6037 static unsigned long cpu_runnable(struct rq *rq)
6038 {
6039     return cfs_rq_runnable_avg(&rq->cfs);
6040 }
6041 
cpu_runnable_without(struct rq * rq,struct task_struct * p)6042 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
6043 {
6044     struct cfs_rq *cfs_rq;
6045     unsigned int runnable;
6046 
6047     /* Task has no contribution or is new */
6048     if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
6049         return cpu_runnable(rq);
6050     }
6051 
6052     cfs_rq = &rq->cfs;
6053     runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
6054 
6055     /* Discount task's runnable from CPU's runnable */
6056     lsub_positive(&runnable, p->se.avg.runnable_avg);
6057 
6058     return runnable;
6059 }
6060 
record_wakee(struct task_struct * p)6061 static void record_wakee(struct task_struct *p)
6062 {
6063     /*
6064      * Only decay a single time; tasks that have less then 1 wakeup per
6065      * jiffy will not have built up many flips.
6066      */
6067     if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
6068         current->wakee_flips >>= 1;
6069         current->wakee_flip_decay_ts = jiffies;
6070     }
6071 
6072     if (current->last_wakee != p) {
6073         current->last_wakee = p;
6074         current->wakee_flips++;
6075     }
6076 }
6077 
6078 /*
6079  * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
6080  *
6081  * A waker of many should wake a different task than the one last awakened
6082  * at a frequency roughly N times higher than one of its wakees.
6083  *
6084  * In order to determine whether we should let the load spread vs consolidating
6085  * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
6086  * partner, and a factor of lls_size higher frequency in the other.
6087  *
6088  * With both conditions met, we can be relatively sure that the relationship is
6089  * non-monogamous, with partner count exceeding socket size.
6090  *
6091  * Waker/wakee being client/server, worker/dispatcher, interrupt source or
6092  * whatever is irrelevant, spread criteria is apparent partner count exceeds
6093  * socket size.
6094  */
wake_wide(struct task_struct * p)6095 static int wake_wide(struct task_struct *p)
6096 {
6097     unsigned int master = current->wakee_flips;
6098     unsigned int slave = p->wakee_flips;
6099     int factor = __this_cpu_read(sd_llc_size);
6100 
6101     if (master < slave) {
6102         swap(master, slave);
6103     }
6104     if (slave < factor || master < slave * factor) {
6105         return 0;
6106     }
6107     return 1;
6108 }
6109 
6110 /*
6111  * The purpose of wake_affine() is to quickly determine on which CPU we can run
6112  * soonest. For the purpose of speed we only consider the waking and previous
6113  * CPU.
6114  *
6115  * wake_affine_idle() - only considers 'now', it check if the waking CPU is
6116  *            cache-affine and is (or    will be) idle.
6117  *
6118  * wake_affine_weight() - considers the weight to reflect the average
6119  *              scheduling latency of the CPUs. This seems to work
6120  *              for the overloaded case.
6121  */
wake_affine_idle(int this_cpu,int prev_cpu,int sync)6122 static int wake_affine_idle(int this_cpu, int prev_cpu, int sync)
6123 {
6124     /*
6125      * If this_cpu is idle, it implies the wakeup is from interrupt
6126      * context. Only allow the move if cache is shared. Otherwise an
6127      * interrupt intensive workload could force all tasks onto one
6128      * node depending on the IO topology or IRQ affinity settings.
6129      *
6130      * If the prev_cpu is idle and cache affine then avoid a migration.
6131      * There is no guarantee that the cache hot data from an interrupt
6132      * is more important than cache hot data on the prev_cpu and from
6133      * a cpufreq perspective, it's better to have higher utilisation
6134      * on one CPU.
6135      */
6136     if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) {
6137         return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
6138     }
6139 
6140     if (sync && cpu_rq(this_cpu)->nr_running == 1) {
6141         return this_cpu;
6142     }
6143 
6144     return nr_cpumask_bits;
6145 }
6146 
wake_affine_weight(struct sched_domain * sd,struct task_struct * p,int this_cpu,int prev_cpu,int sync)6147 static int wake_affine_weight(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync)
6148 {
6149     s64 this_eff_load, prev_eff_load;
6150     unsigned long task_load;
6151 
6152     this_eff_load = cpu_load(cpu_rq(this_cpu));
6153 
6154     if (sync) {
6155         unsigned long current_load = task_h_load(current);
6156         if (current_load > this_eff_load) {
6157             return this_cpu;
6158         }
6159 
6160         this_eff_load -= current_load;
6161     }
6162 
6163     task_load = task_h_load(p);
6164 
6165     this_eff_load += task_load;
6166     if (sched_feat(WA_BIAS)) {
6167         this_eff_load *= FAIR_ONEHUNDRED;
6168     }
6169     this_eff_load *= capacity_of(prev_cpu);
6170 
6171     prev_eff_load = cpu_load(cpu_rq(prev_cpu));
6172     prev_eff_load -= task_load;
6173     if (sched_feat(WA_BIAS)) {
6174         prev_eff_load *= FAIR_ONEHUNDRED + (sd->imbalance_pct - FAIR_ONEHUNDRED) / 0x2;
6175     }
6176     prev_eff_load *= capacity_of(this_cpu);
6177 
6178     /*
6179      * If sync, adjust the weight of prev_eff_load such that if
6180      * prev_eff == this_eff that select_idle_sibling() will consider
6181      * stacking the wakee on top of the waker if no other CPU is
6182      * idle.
6183      */
6184     if (sync) {
6185         prev_eff_load += 1;
6186     }
6187 
6188     return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
6189 }
6190 
wake_affine(struct sched_domain * sd,struct task_struct * p,int this_cpu,int prev_cpu,int sync)6191 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync)
6192 {
6193     int target = nr_cpumask_bits;
6194 
6195     if (sched_feat(WA_IDLE)) {
6196         target = wake_affine_idle(this_cpu, prev_cpu, sync);
6197     }
6198 
6199     if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits) {
6200         target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
6201     }
6202 
6203     schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
6204     if (target == nr_cpumask_bits) {
6205         return prev_cpu;
6206     }
6207 
6208     schedstat_inc(sd->ttwu_move_affine);
6209     schedstat_inc(p->se.statistics.nr_wakeups_affine);
6210     return target;
6211 }
6212 
6213 static struct sched_group *find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
6214 
6215 /*
6216  * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
6217  */
find_idlest_group_cpu(struct sched_group * group,struct task_struct * p,int this_cpu)6218 static int find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
6219 {
6220     unsigned long load, min_load = ULONG_MAX;
6221     unsigned int min_exit_latency = UINT_MAX;
6222     u64 latest_idle_timestamp = 0;
6223     int least_loaded_cpu = this_cpu;
6224     int shallowest_idle_cpu = -1;
6225     int i;
6226 
6227     /* Check if we have any choice: */
6228     if (group->group_weight == 1) {
6229         return cpumask_first(sched_group_span(group));
6230     }
6231 
6232     /* Traverse only the allowed CPUs */
6233     for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr)
6234     {
6235         if (cpu_isolated(i)) {
6236             continue;
6237         }
6238 
6239         if (sched_idle_cpu(i)) {
6240             return i;
6241         }
6242 
6243         if (available_idle_cpu(i)) {
6244             struct rq *rq = cpu_rq(i);
6245             struct cpuidle_state *idle = idle_get_state(rq);
6246             if (idle && idle->exit_latency < min_exit_latency) {
6247                 /*
6248                  * We give priority to a CPU whose idle state
6249                  * has the smallest exit latency irrespective
6250                  * of any idle timestamp.
6251                  */
6252                 min_exit_latency = idle->exit_latency;
6253                 latest_idle_timestamp = rq->idle_stamp;
6254                 shallowest_idle_cpu = i;
6255             } else if ((!idle || idle->exit_latency == min_exit_latency) && rq->idle_stamp > latest_idle_timestamp) {
6256                 /*
6257                  * If equal or no active idle state, then
6258                  * the most recently idled CPU might have
6259                  * a warmer cache.
6260                  */
6261                 latest_idle_timestamp = rq->idle_stamp;
6262                 shallowest_idle_cpu = i;
6263             }
6264         } else if (shallowest_idle_cpu == -1) {
6265             load = cpu_load(cpu_rq(i));
6266             if (load < min_load) {
6267                 min_load = load;
6268                 least_loaded_cpu = i;
6269             }
6270         }
6271     }
6272 
6273     return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
6274 }
6275 
find_idlest_cpu(struct sched_domain * sd,struct task_struct * p,int cpu,int prev_cpu,int sd_flag)6276 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, int cpu, int prev_cpu, int sd_flag)
6277 {
6278     int new_cpu = cpu;
6279 
6280     if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) {
6281         return prev_cpu;
6282     }
6283 
6284     /*
6285      * We need task's util for cpu_util_without, sync it up to
6286      * prev_cpu's last_update_time.
6287      */
6288     if (!(sd_flag & SD_BALANCE_FORK)) {
6289         sync_entity_load_avg(&p->se);
6290     }
6291 
6292     while (sd) {
6293         struct sched_group *group;
6294         struct sched_domain *tmp;
6295         int weight;
6296 
6297         if (!(sd->flags & sd_flag)) {
6298             sd = sd->child;
6299             continue;
6300         }
6301 
6302         group = find_idlest_group(sd, p, cpu);
6303         if (!group) {
6304             sd = sd->child;
6305             continue;
6306         }
6307 
6308         new_cpu = find_idlest_group_cpu(group, p, cpu);
6309         if (new_cpu == cpu) {
6310             /* Now try balancing at a lower domain level of 'cpu': */
6311             sd = sd->child;
6312             continue;
6313         }
6314 
6315         /* Now try balancing at a lower domain level of 'new_cpu': */
6316         cpu = new_cpu;
6317         weight = sd->span_weight;
6318         sd = NULL;
6319         for_each_domain(cpu, tmp)
6320         {
6321             if (weight <= tmp->span_weight) {
6322                 break;
6323             }
6324             if (tmp->flags & sd_flag) {
6325                 sd = tmp;
6326             }
6327         }
6328     }
6329 
6330     return new_cpu;
6331 }
6332 
6333 #ifdef CONFIG_SCHED_SMT
6334 DEFINE_STATIC_KEY_FALSE(sched_smt_present);
6335 EXPORT_SYMBOL_GPL(sched_smt_present);
6336 
set_idle_cores(int cpu,int val)6337 static inline void set_idle_cores(int cpu, int val)
6338 {
6339     struct sched_domain_shared *sds;
6340 
6341     sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
6342     if (sds) {
6343         WRITE_ONCE(sds->has_idle_cores, val);
6344     }
6345 }
6346 
test_idle_cores(int cpu,bool def)6347 static inline bool test_idle_cores(int cpu, bool def)
6348 {
6349     struct sched_domain_shared *sds;
6350 
6351     sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
6352     if (sds) {
6353         return READ_ONCE(sds->has_idle_cores);
6354     }
6355 
6356     return def;
6357 }
6358 
6359 /*
6360  * Scans the local SMT mask to see if the entire core is idle, and records this
6361  * information in sd_llc_shared->has_idle_cores.
6362  *
6363  * Since SMT siblings share all cache levels, inspecting this limited remote
6364  * state should be fairly cheap.
6365  */
fair_update_idle_core(struct rq * rq)6366 void fair_update_idle_core(struct rq *rq)
6367 {
6368     int core = cpu_of(rq);
6369     int cpu;
6370 
6371     rcu_read_lock();
6372     if (test_idle_cores(core, true)) {
6373         goto unlock;
6374     }
6375 
6376     for_each_cpu(cpu, cpu_smt_mask(core))
6377     {
6378         if (cpu == core) {
6379             continue;
6380         }
6381 
6382         if (!available_idle_cpu(cpu)) {
6383             goto unlock;
6384         }
6385     }
6386 
6387     set_idle_cores(core, 1);
6388 unlock:
6389     rcu_read_unlock();
6390 }
6391 
6392 /*
6393  * Scan the entire LLC domain for idle cores; this dynamically switches off if
6394  * there are no idle cores left in the system; tracked through
6395  * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
6396  */
select_idle_core(struct task_struct * p,struct sched_domain * sd,int target)6397 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
6398 {
6399     struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
6400     int core, cpu;
6401 
6402     if (!static_branch_likely(&sched_smt_present)) {
6403         return -1;
6404     }
6405 
6406     if (!test_idle_cores(target, false)) {
6407         return -1;
6408     }
6409 
6410     cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6411 #ifdef CONFIG_CPU_ISOLATION_OPT
6412     cpumask_andnot(cpus, cpus, cpu_isolated_mask);
6413 #endif
6414 
6415     for_each_cpu_wrap(core, cpus, target)
6416     {
6417         bool idle = true;
6418 
6419         for_each_cpu(cpu, cpu_smt_mask(core))
6420         {
6421             if (!available_idle_cpu(cpu)) {
6422                 idle = false;
6423                 break;
6424             }
6425         }
6426         cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
6427 
6428         if (idle) {
6429             return core;
6430         }
6431     }
6432 
6433     /*
6434      * Failed to find an idle core; stop looking for one.
6435      */
6436     set_idle_cores(target, 0);
6437 
6438     return -1;
6439 }
6440 
6441 /*
6442  * Scan the local SMT mask for idle CPUs.
6443  */
select_idle_smt(struct task_struct * p,struct sched_domain * sd,int target)6444 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
6445 {
6446     int cpu;
6447 
6448     if (!static_branch_likely(&sched_smt_present)) {
6449         return -1;
6450     }
6451 
6452     for_each_cpu(cpu, cpu_smt_mask(target))
6453     {
6454         if (!cpumask_test_cpu(cpu, p->cpus_ptr) || !cpumask_test_cpu(cpu, sched_domain_span(sd))) {
6455             continue;
6456         }
6457         if (cpu_isolated(cpu)) {
6458             continue;
6459         }
6460         if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) {
6461             return cpu;
6462         }
6463     }
6464 
6465     return -1;
6466 }
6467 
6468 #else /* CONFIG_SCHED_SMT */
6469 
select_idle_core(struct task_struct * p,struct sched_domain * sd,int target)6470 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
6471 {
6472     return -1;
6473 }
6474 
select_idle_smt(struct task_struct * p,struct sched_domain * sd,int target)6475 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
6476 {
6477     return -1;
6478 }
6479 
6480 #endif /* CONFIG_SCHED_SMT */
6481 
6482 /*
6483  * Scan the LLC domain for idle CPUs; this is dynamically regulated by
6484  * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
6485  * average idle time for this rq (as found in rq->avg_idle).
6486  */
select_idle_cpu(struct task_struct * p,struct sched_domain * sd,int target)6487 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
6488 {
6489     struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
6490     struct sched_domain *this_sd;
6491     u64 avg_cost, avg_idle;
6492     u64 time;
6493     int this = smp_processor_id();
6494     int cpu, nr = INT_MAX;
6495 
6496     this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
6497     if (!this_sd) {
6498         return -1;
6499     }
6500 
6501     /*
6502      * Due to large variance we need a large fuzz factor; hackbench in
6503      * particularly is sensitive here.
6504      */
6505     avg_idle = this_rq()->avg_idle / 0x200;
6506     avg_cost = this_sd->avg_scan_cost + 1;
6507 
6508     if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost) {
6509         return -1;
6510     }
6511 
6512     if (sched_feat(SIS_PROP)) {
6513         u64 span_avg = sd->span_weight * avg_idle;
6514         if (span_avg > 0x4 * avg_cost) {
6515             nr = div_u64(span_avg, avg_cost);
6516         } else {
6517             nr = 0x4;
6518         }
6519     }
6520 
6521     time = cpu_clock(this);
6522 
6523     cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6524 
6525     for_each_cpu_wrap(cpu, cpus, target)
6526     {
6527         if (!--nr) {
6528             return -1;
6529         }
6530         if (cpu_isolated(cpu)) {
6531             continue;
6532         }
6533         if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) {
6534             break;
6535         }
6536     }
6537 
6538     time = cpu_clock(this) - time;
6539     update_avg(&this_sd->avg_scan_cost, time);
6540 
6541     return cpu;
6542 }
6543 
6544 /*
6545  * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
6546  * the task fits. If no CPU is big enough, but there are idle ones, try to
6547  * maximize capacity.
6548  */
select_idle_capacity(struct task_struct * p,struct sched_domain * sd,int target)6549 static int select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
6550 {
6551     unsigned long task_util, best_cap = 0;
6552     int cpu, best_cpu = -1;
6553     struct cpumask *cpus;
6554 
6555     cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
6556     cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6557 
6558     task_util = uclamp_task_util(p);
6559 
6560     for_each_cpu_wrap(cpu, cpus, target)
6561     {
6562         unsigned long cpu_cap = capacity_of(cpu);
6563 
6564         if (cpu_isolated(cpu)) {
6565             continue;
6566         }
6567 
6568         if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu)) {
6569             continue;
6570         }
6571         if (fits_capacity(task_util, cpu_cap)) {
6572             return cpu;
6573         }
6574 
6575         if (cpu_cap > best_cap) {
6576             best_cap = cpu_cap;
6577             best_cpu = cpu;
6578         }
6579     }
6580 
6581     return best_cpu;
6582 }
6583 
asym_fits_capacity(int task_util,int cpu)6584 static inline bool asym_fits_capacity(int task_util, int cpu)
6585 {
6586     if (static_branch_unlikely(&sched_asym_cpucapacity)) {
6587         return fits_capacity(task_util, capacity_of(cpu));
6588     }
6589 
6590     return true;
6591 }
6592 
6593 /*
6594  * Try and locate an idle core/thread in the LLC cache domain.
6595  */
select_idle_sibling(struct task_struct * p,int prev,int target)6596 static int select_idle_sibling(struct task_struct *p, int prev, int target)
6597 {
6598     struct sched_domain *sd;
6599     unsigned long task_util;
6600     int i, recent_used_cpu;
6601 
6602     /*
6603      * On asymmetric system, update task utilization because we will check
6604      * that the task fits with cpu's capacity.
6605      */
6606     if (static_branch_unlikely(&sched_asym_cpucapacity)) {
6607         sync_entity_load_avg(&p->se);
6608         task_util = uclamp_task_util(p);
6609     }
6610 
6611     if ((available_idle_cpu(target) || sched_idle_cpu(target)) && !cpu_isolated(target) &&
6612         asym_fits_capacity(task_util, target)) {
6613         return target;
6614     }
6615 
6616     /*
6617      * If the previous CPU is cache affine and idle, don't be stupid:
6618      */
6619     if (prev != target && cpus_share_cache(prev, target) &&
6620         ((available_idle_cpu(prev) || sched_idle_cpu(prev)) && !cpu_isolated(target) &&
6621          asym_fits_capacity(task_util, prev))) {
6622         return prev;
6623     }
6624 
6625     if (is_per_cpu_kthread(current) &&
6626         in_task() &&
6627         prev == smp_processor_id() &&
6628         this_rq()->nr_running <= 1 &&
6629         asym_fits_capacity(task_util, prev)) {
6630         return prev;
6631     }
6632 
6633     /* Check a recently used CPU as a potential idle candidate: */
6634     recent_used_cpu = p->recent_used_cpu;
6635     if (recent_used_cpu != prev && recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) &&
6636         (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
6637         cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && asym_fits_capacity(task_util, recent_used_cpu)) {
6638         p->recent_used_cpu = prev;
6639         return recent_used_cpu;
6640     }
6641 
6642     if (static_branch_unlikely(&sched_asym_cpucapacity)) {
6643         sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
6644         if (sd) {
6645             i = select_idle_capacity(p, sd, target);
6646             return ((unsigned)i < nr_cpumask_bits) ? i : target;
6647         }
6648     }
6649 
6650     sd = rcu_dereference(per_cpu(sd_llc, target));
6651     if (!sd) {
6652         return target;
6653     }
6654 
6655     i = select_idle_core(p, sd, target);
6656     if ((unsigned)i < nr_cpumask_bits) {
6657         return i;
6658     }
6659 
6660     i = select_idle_cpu(p, sd, target);
6661     if ((unsigned)i < nr_cpumask_bits) {
6662         return i;
6663     }
6664 
6665     i = select_idle_smt(p, sd, target);
6666     if ((unsigned)i < nr_cpumask_bits) {
6667         return i;
6668     }
6669 
6670     return target;
6671 }
6672 
6673 /**
6674  * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
6675  * @cpu: the CPU to get the utilization of
6676  *
6677  * The unit of the return value must be the one of capacity so we can compare
6678  * the utilization with the capacity of the CPU that is available for CFS task
6679  * (ie cpu_capacity).
6680  *
6681  * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
6682  * recent utilization of currently non-runnable tasks on a CPU. It represents
6683  * the amount of utilization of a CPU in the range [0..capacity_orig] where
6684  * capacity_orig is the cpu_capacity available at the highest frequency
6685  * (arch_scale_freq_capacity()).
6686  * The utilization of a CPU converges towards a sum equal to or less than the
6687  * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
6688  * the running time on this CPU scaled by capacity_curr.
6689  *
6690  * The estimated utilization of a CPU is defined to be the maximum between its
6691  * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
6692  * currently RUNNABLE on that CPU.
6693  * This allows to properly represent the expected utilization of a CPU which
6694  * has just got a big task running since a long sleep period. At the same time
6695  * however it preserves the benefits of the "blocked utilization" in
6696  * describing the potential for other tasks waking up on the same CPU.
6697  *
6698  * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
6699  * higher than capacity_orig because of unfortunate rounding in
6700  * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
6701  * the average stabilizes with the new running time. We need to check that the
6702  * utilization stays within the range of [0..capacity_orig] and cap it if
6703  * necessary. Without utilization capping, a group could be seen as overloaded
6704  * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
6705  * available capacity. We allow utilization to overshoot capacity_curr (but not
6706  * capacity_orig) as it useful for predicting the capacity required after task
6707  * migrations (scheduler-driven DVFS).
6708  *
6709  * Return: the (estimated) utilization for the specified CPU
6710  */
cpu_util(int cpu)6711 unsigned long cpu_util(int cpu)
6712 {
6713     struct cfs_rq *cfs_rq;
6714     unsigned int util;
6715 
6716 #ifdef CONFIG_SCHED_WALT
6717     if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util)) {
6718         u64 walt_cpu_util = cpu_rq(cpu)->walt_stats.cumulative_runnable_avg_scaled;
6719 
6720         return min_t(unsigned long, walt_cpu_util, capacity_orig_of(cpu));
6721     }
6722 #endif
6723 
6724     cfs_rq = &cpu_rq(cpu)->cfs;
6725     util = READ_ONCE(cfs_rq->avg.util_avg);
6726 
6727     if (sched_feat(UTIL_EST)) {
6728         util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
6729     }
6730 
6731     return min_t(unsigned long, util, capacity_orig_of(cpu));
6732 }
6733 
6734 /*
6735  * cpu_util_without: compute cpu utilization without any contributions from *p
6736  * @cpu: the CPU which utilization is requested
6737  * @p: the task which utilization should be discounted
6738  *
6739  * The utilization of a CPU is defined by the utilization of tasks currently
6740  * enqueued on that CPU as well as tasks which are currently sleeping after an
6741  * execution on that CPU.
6742  *
6743  * This method returns the utilization of the specified CPU by discounting the
6744  * utilization of the specified task, whenever the task is currently
6745  * contributing to the CPU utilization.
6746  */
cpu_util_without(int cpu,struct task_struct * p)6747 static unsigned long cpu_util_without(int cpu, struct task_struct *p)
6748 {
6749     struct cfs_rq *cfs_rq;
6750     unsigned int util;
6751 
6752 #ifdef CONFIG_SCHED_WALT
6753     /*
6754      * WALT does not decay idle tasks in the same manner
6755      * as PELT, so it makes little sense to subtract task
6756      * utilization from cpu utilization. Instead just use
6757      * cpu_util for this case.
6758      */
6759     if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util) && p->state == TASK_WAKING) {
6760         return cpu_util(cpu);
6761     }
6762 #endif
6763 
6764     /* Task has no contribution or is new */
6765     if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
6766         return cpu_util(cpu);
6767     }
6768 
6769 #ifdef CONFIG_SCHED_WALT
6770     if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util)) {
6771         util = max_t(long, cpu_util(cpu) - task_util(p), 0);
6772         return min_t(unsigned long, util, capacity_orig_of(cpu));
6773     }
6774 #endif
6775 
6776     cfs_rq = &cpu_rq(cpu)->cfs;
6777     util = READ_ONCE(cfs_rq->avg.util_avg);
6778 
6779     /* Discount task's util from CPU's util */
6780     lsub_positive(&util, task_util(p));
6781 
6782     /*
6783      * Covered cases:
6784      *
6785      * a) if *p is the only task sleeping on this CPU, then:
6786      *      cpu_util (== task_util) > util_est (== 0)
6787      *    and thus we return:
6788      *      cpu_util_without = (cpu_util - task_util) = 0
6789      *
6790      * b) if other tasks are SLEEPING on this CPU, which is now exiting
6791      *    IDLE, then:
6792      *      cpu_util >= task_util
6793      *      cpu_util > util_est (== 0)
6794      *    and thus we discount *p's blocked utilization to return:
6795      *      cpu_util_without = (cpu_util - task_util) >= 0
6796      *
6797      * c) if other tasks are RUNNABLE on that CPU and
6798      *      util_est > cpu_util
6799      *    then we use util_est since it returns a more restrictive
6800      *    estimation of the spare capacity on that CPU, by just
6801      *    considering the expected utilization of tasks already
6802      *    runnable on that CPU.
6803      *
6804      * Cases a) and b) are covered by the above code, while case c) is
6805      * covered by the following code when estimated utilization is
6806      * enabled.
6807      */
6808     if (sched_feat(UTIL_EST)) {
6809         unsigned int estimated = READ_ONCE(cfs_rq->avg.util_est.enqueued);
6810 
6811         /*
6812          * Despite the following checks we still have a small window
6813          * for a possible race, when an execl's select_task_rq_fair()
6814          * races with LB's detach_task():
6815          *
6816          *   detach_task()
6817          *     p->on_rq = TASK_ON_RQ_MIGRATING;
6818          *     ---------------------------------- A
6819          *     deactivate_task()                   \
6820          *       dequeue_task()                     + RaceTime
6821          *         util_est_dequeue()              /
6822          *     ---------------------------------- B
6823          *
6824          * The additional check on "current == p" it's required to
6825          * properly fix the execl regression and it helps in further
6826          * reducing the chances for the above race.
6827          */
6828         if (unlikely(task_on_rq_queued(p) || current == p)) {
6829             lsub_positive(&estimated, _task_util_est(p));
6830         }
6831 
6832         util = max(util, estimated);
6833     }
6834 
6835     /*
6836      * Utilization (estimated) can exceed the CPU capacity, thus let's
6837      * clamp to the maximum CPU capacity to ensure consistency with
6838      * the cpu_util call.
6839      */
6840     return min_t(unsigned long, util, capacity_orig_of(cpu));
6841 }
6842 
6843 #ifdef CONFIG_SCHED_RTG
capacity_spare_without(int cpu,struct task_struct * p)6844 unsigned long capacity_spare_without(int cpu, struct task_struct *p)
6845 {
6846     return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
6847 }
6848 #endif
6849 /*
6850  * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
6851  * to @dst_cpu.
6852  */
cpu_util_next(int cpu,struct task_struct * p,int dst_cpu)6853 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
6854 {
6855     struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
6856     unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg);
6857 
6858     /*
6859      * If @p migrates from @cpu to another, remove its contribution. Or,
6860      * if @p migrates from another CPU to @cpu, add its contribution. In
6861      * the other cases, @cpu is not impacted by the migration, so the
6862      * util_avg should already be correct.
6863      */
6864     if (task_cpu(p) == cpu && dst_cpu != cpu) {
6865         sub_positive(&util, task_util(p));
6866     } else if (task_cpu(p) != cpu && dst_cpu == cpu) {
6867         util += task_util(p);
6868     }
6869 
6870     if (sched_feat(UTIL_EST)) {
6871         util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
6872 
6873         /*
6874          * During wake-up, the task isn't enqueued yet and doesn't
6875          * appear in the cfs_rq->avg.util_est.enqueued of any rq,
6876          * so just add it (if needed) to "simulate" what will be
6877          * cpu_util() after the task has been enqueued.
6878          */
6879         if (dst_cpu == cpu) {
6880             util_est += _task_util_est(p);
6881         }
6882 
6883         util = max(util, util_est);
6884     }
6885 
6886     return min(util, capacity_orig_of(cpu));
6887 }
6888 
6889 /*
6890  * Returns the current capacity of cpu after applying both
6891  * cpu and freq scaling.
6892  */
capacity_curr_of(int cpu)6893 unsigned long capacity_curr_of(int cpu)
6894 {
6895     unsigned long max_cap = cpu_rq(cpu)->cpu_capacity_orig;
6896     unsigned long scale_freq = arch_scale_freq_capacity(cpu);
6897 
6898     return cap_scale(max_cap, scale_freq);
6899 }
6900 
6901 /*
6902  * compute_energy(): Estimates the energy that @pd would consume if @p was
6903  * migrated to @dst_cpu. compute_energy() predicts what will be the utilization
6904  * landscape of @pd's CPUs after the task migration, and uses the Energy Model
6905  * to compute what would be the energy if we decided to actually migrate that
6906  * task.
6907  */
compute_energy(struct task_struct * p,int dst_cpu,struct perf_domain * pd)6908 static long compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
6909 {
6910     struct cpumask *pd_mask = perf_domain_span(pd);
6911     unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask));
6912     unsigned long max_util = 0, sum_util = 0;
6913     int cpu;
6914 
6915     /*
6916      * The capacity state of CPUs of the current rd can be driven by CPUs
6917      * of another rd if they belong to the same pd. So, account for the
6918      * utilization of these CPUs too by masking pd with cpu_online_mask
6919      * instead of the rd span.
6920      *
6921      * If an entire pd is outside of the current rd, it will not appear in
6922      * its pd list and will not be accounted by compute_energy().
6923      */
6924     for_each_cpu_and(cpu, pd_mask, cpu_online_mask)
6925     {
6926         unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu);
6927         struct task_struct *tsk = cpu == dst_cpu ? p : NULL;
6928 
6929         /*
6930          * Busy time computation: utilization clamping is not
6931          * required since the ratio (sum_util / cpu_capacity)
6932          * is already enough to scale the EM reported power
6933          * consumption at the (eventually clamped) cpu_capacity.
6934          */
6935         sum_util += schedutil_cpu_util(cpu, util_cfs, cpu_cap, ENERGY_UTIL, NULL);
6936 
6937         /*
6938          * Performance domain frequency: utilization clamping
6939          * must be considered since it affects the selection
6940          * of the performance domain frequency.
6941          * NOTE: in case RT tasks are running, by default the
6942          * FREQUENCY_UTIL's utilization can be max OPP.
6943          */
6944         cpu_util = schedutil_cpu_util(cpu, util_cfs, cpu_cap, FREQUENCY_UTIL, tsk);
6945         max_util = max(max_util, cpu_util);
6946     }
6947 
6948     return em_cpu_energy(pd->em_pd, max_util, sum_util);
6949 }
6950 
6951 /*
6952  * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
6953  * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
6954  * spare capacity in each performance domain and uses it as a potential
6955  * candidate to execute the task. Then, it uses the Energy Model to figure
6956  * out which of the CPU candidates is the most energy-efficient.
6957  *
6958  * The rationale for this heuristic is as follows. In a performance domain,
6959  * all the most energy efficient CPU candidates (according to the Energy
6960  * Model) are those for which we'll request a low frequency. When there are
6961  * several CPUs for which the frequency request will be the same, we don't
6962  * have enough data to break the tie between them, because the Energy Model
6963  * only includes active power costs. With this model, if we assume that
6964  * frequency requests follow utilization (e.g. using schedutil), the CPU with
6965  * the maximum spare capacity in a performance domain is guaranteed to be among
6966  * the best candidates of the performance domain.
6967  *
6968  * In practice, it could be preferable from an energy standpoint to pack
6969  * small tasks on a CPU in order to let other CPUs go in deeper idle states,
6970  * but that could also hurt our chances to go cluster idle, and we have no
6971  * ways to tell with the current Energy Model if this is actually a good
6972  * idea or not. So, find_energy_efficient_cpu() basically favors
6973  * cluster-packing, and spreading inside a cluster. That should at least be
6974  * a good thing for latency, and this is consistent with the idea that most
6975  * of the energy savings of EAS come from the asymmetry of the system, and
6976  * not so much from breaking the tie between identical CPUs. That's also the
6977  * reason why EAS is enabled in the topology code only for systems where
6978  * SD_ASYM_CPUCAPACITY is set.
6979  *
6980  * NOTE: Forkees are not accepted in the energy-aware wake-up path because
6981  * they don't have any useful utilization data yet and it's not possible to
6982  * forecast their impact on energy consumption. Consequently, they will be
6983  * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
6984  * to be energy-inefficient in some use-cases. The alternative would be to
6985  * bias new tasks towards specific types of CPUs first, or to try to infer
6986  * their util_avg from the parent task, but those heuristics could hurt
6987  * other use-cases too. So, until someone finds a better way to solve this,
6988  * let's keep things simple by re-using the existing slow path.
6989  */
find_energy_efficient_cpu(struct task_struct * p,int prev_cpu)6990 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
6991 {
6992     unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
6993     struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
6994     unsigned long cpu_cap, util, base_energy = 0;
6995     int cpu, best_energy_cpu = prev_cpu;
6996     struct sched_domain *sd;
6997     struct perf_domain *pd;
6998 
6999     rcu_read_lock();
7000     pd = rcu_dereference(rd->pd);
7001     if (!pd || READ_ONCE(rd->overutilized)) {
7002         goto fail;
7003     }
7004 
7005     /*
7006      * Energy-aware wake-up happens on the lowest sched_domain starting
7007      * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
7008      */
7009     sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
7010     while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
7011         sd = sd->parent;
7012     }
7013     if (!sd) {
7014         goto fail;
7015     }
7016 
7017     sync_entity_load_avg(&p->se);
7018     if (!task_util_est(p)) {
7019         goto unlock;
7020     }
7021 
7022     for (; pd; pd = pd->next) {
7023         unsigned long cur_delta, spare_cap, max_spare_cap = 0;
7024         unsigned long base_energy_pd;
7025         int max_spare_cap_cpu = -1;
7026 
7027         /* Compute the 'base' energy of the pd, without @p */
7028         base_energy_pd = compute_energy(p, -1, pd);
7029         base_energy += base_energy_pd;
7030 
7031         for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd))
7032         {
7033             if (!cpumask_test_cpu(cpu, p->cpus_ptr)) {
7034                 continue;
7035             }
7036 
7037             util = cpu_util_next(cpu, p, cpu);
7038             cpu_cap = capacity_of(cpu);
7039             spare_cap = cpu_cap;
7040             lsub_positive(&spare_cap, util);
7041 
7042             /*
7043              * Skip CPUs that cannot satisfy the capacity request.
7044              * IOW, placing the task there would make the CPU
7045              * overutilized. Take uclamp into account to see how
7046              * much capacity we can get out of the CPU; this is
7047              * aligned with schedutil_cpu_util().
7048              */
7049             util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
7050             if (!fits_capacity(util, cpu_cap)) {
7051                 continue;
7052             }
7053 
7054             /* Always use prev_cpu as a candidate. */
7055             if (cpu == prev_cpu) {
7056                 prev_delta = compute_energy(p, prev_cpu, pd);
7057                 prev_delta -= base_energy_pd;
7058                 best_delta = min(best_delta, prev_delta);
7059             }
7060 
7061             /*
7062              * Find the CPU with the maximum spare capacity in
7063              * the performance domain
7064              */
7065             if (spare_cap > max_spare_cap) {
7066                 max_spare_cap = spare_cap;
7067                 max_spare_cap_cpu = cpu;
7068             }
7069         }
7070 
7071         /* Evaluate the energy impact of using this CPU. */
7072         if (max_spare_cap_cpu >= 0 && max_spare_cap_cpu != prev_cpu) {
7073             cur_delta = compute_energy(p, max_spare_cap_cpu, pd);
7074             cur_delta -= base_energy_pd;
7075             if (cur_delta < best_delta) {
7076                 best_delta = cur_delta;
7077                 best_energy_cpu = max_spare_cap_cpu;
7078             }
7079         }
7080     }
7081 unlock:
7082     rcu_read_unlock();
7083 
7084     /*
7085      * Pick the best CPU if prev_cpu cannot be used, or if it saves at
7086      * least 6% of the energy used by prev_cpu.
7087      */
7088     if (prev_delta == ULONG_MAX) {
7089         return best_energy_cpu;
7090     }
7091 
7092     if ((prev_delta - best_delta) > ((prev_delta + base_energy) >> 0x4)) {
7093         return best_energy_cpu;
7094     }
7095 
7096     return prev_cpu;
7097 
7098 fail:
7099     rcu_read_unlock();
7100 
7101     return -1;
7102 }
7103 
7104 /*
7105  * select_task_rq_fair: Select target runqueue for the waking task in domains
7106  * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
7107  * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
7108  *
7109  * Balances load by selecting the idlest CPU in the idlest group, or under
7110  * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
7111  *
7112  * Returns the target CPU number.
7113  *
7114  * preempt must be disabled.
7115  */
select_task_rq_fair(struct task_struct * p,int prev_cpu,int sd_flag,int wake_flags)7116 static int select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
7117 {
7118     struct sched_domain *tmp, *sd = NULL;
7119     int cpu = smp_processor_id();
7120     int new_cpu = prev_cpu;
7121     int want_affine = 0;
7122     int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
7123 #ifdef CONFIG_SCHED_RTG
7124     int target_cpu = -1;
7125     target_cpu = find_rtg_cpu(p);
7126     if (target_cpu >= 0) {
7127         return target_cpu;
7128     }
7129 #endif
7130 
7131     if (sd_flag & SD_BALANCE_WAKE) {
7132         record_wakee(p);
7133 
7134         if (sched_energy_enabled()) {
7135             new_cpu = find_energy_efficient_cpu(p, prev_cpu);
7136             if (new_cpu >= 0) {
7137                 return new_cpu;
7138             }
7139             new_cpu = prev_cpu;
7140         }
7141 
7142         want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
7143     }
7144 
7145     rcu_read_lock();
7146     for_each_domain(cpu, tmp)
7147     {
7148         /*
7149          * If both 'cpu' and 'prev_cpu' are part of this domain,
7150          * cpu is a valid SD_WAKE_AFFINE target.
7151          */
7152         if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
7153             if (cpu != prev_cpu) {
7154                 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
7155             }
7156 
7157             sd = NULL; /* Prefer wake_affine over balance flags */
7158             break;
7159         }
7160 
7161         if (tmp->flags & sd_flag) {
7162             sd = tmp;
7163         } else if (!want_affine) {
7164             break;
7165         }
7166     }
7167 
7168     if (unlikely(sd)) {
7169         /* Slow path */
7170         new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
7171     } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
7172         /* Fast path */
7173 
7174         new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
7175 
7176         if (want_affine) {
7177             current->recent_used_cpu = cpu;
7178         }
7179     }
7180     rcu_read_unlock();
7181 
7182     return new_cpu;
7183 }
7184 
7185 static void detach_entity_cfs_rq(struct sched_entity *se);
7186 
7187 /*
7188  * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
7189  * cfs_rq_of(p) references at time of call are still valid and identify the
7190  * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
7191  */
migrate_task_rq_fair(struct task_struct * p,int new_cpu)7192 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
7193 {
7194     /*
7195      * As blocked tasks retain absolute vruntime the migration needs to
7196      * deal with this by subtracting the old and adding the new
7197      * min_vruntime -- the latter is done by enqueue_entity() when placing
7198      * the task on the new runqueue.
7199      */
7200     if (p->state == TASK_WAKING) {
7201         struct sched_entity *se = &p->se;
7202         struct cfs_rq *cfs_rq = cfs_rq_of(se);
7203         u64 min_vruntime;
7204 
7205 #ifndef CONFIG_64BIT
7206         u64 min_vruntime_copy;
7207 
7208         do {
7209             min_vruntime_copy = cfs_rq->min_vruntime_copy;
7210             smp_rmb();
7211             min_vruntime = cfs_rq->min_vruntime;
7212         } while (min_vruntime != min_vruntime_copy);
7213 #else
7214         min_vruntime = cfs_rq->min_vruntime;
7215 #endif
7216 
7217         se->vruntime -= min_vruntime;
7218     }
7219 
7220     if (p->on_rq == TASK_ON_RQ_MIGRATING) {
7221         /*
7222          * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
7223          * rq->lock and can modify state directly.
7224          */
7225         lockdep_assert_held(&task_rq(p)->lock);
7226         detach_entity_cfs_rq(&p->se);
7227     } else {
7228         /*
7229          * We are supposed to update the task to "current" time, then
7230          * its up to date and ready to go to new CPU/cfs_rq. But we
7231          * have difficulty in getting what current time is, so simply
7232          * throw away the out-of-date time. This will result in the
7233          * wakee task is less decayed, but giving the wakee more load
7234          * sounds not bad.
7235          */
7236         remove_entity_load_avg(&p->se);
7237     }
7238 
7239     /* Tell new CPU we are migrated */
7240     p->se.avg.last_update_time = 0;
7241 
7242     /* We have migrated, no longer consider this task hot */
7243     p->se.exec_start = 0;
7244 
7245     update_scan_period(p, new_cpu);
7246 }
7247 
task_dead_fair(struct task_struct * p)7248 static void task_dead_fair(struct task_struct *p)
7249 {
7250     remove_entity_load_avg(&p->se);
7251 }
7252 
balance_fair(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)7253 static int balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
7254 {
7255     if (rq->nr_running) {
7256         return 1;
7257     }
7258     return newidle_balance(rq, rf) != 0;
7259 }
7260 #endif /* CONFIG_SMP */
7261 
7262 #ifdef CONFIG_SCHED_LATENCY_NICE
wakeup_latency_gran(struct sched_entity * curr,struct sched_entity * se)7263 static long wakeup_latency_gran(struct sched_entity *curr, struct sched_entity *se)
7264 {
7265     int latency_weight = se->latency_weight;
7266     long thresh = sysctl_sched_latency;
7267     if ((se->latency_weight > 0) || (curr->latency_weight > 0))
7268         latency_weight -= curr->latency_weight;
7269     if (!latency_weight)
7270         return 0;
7271     if (sched_feat(GENTLE_FAIR_SLEEPERS))
7272         thresh >>= 1;
7273     latency_weight = clamp_t(long, latency_weight,
7274                 -1 * NICE_LATENCY_WEIGHT_MAX,
7275                 NICE_LATENCY_WEIGHT_MAX);
7276     return (thresh * latency_weight) >> NICE_LATENCY_SHIFT;
7277 }
7278 #endif /* CONFIG_SMP */
7279 
wakeup_gran(struct sched_entity * se)7280 static unsigned long wakeup_gran(struct sched_entity *se)
7281 {
7282     unsigned long gran = sysctl_sched_wakeup_granularity;
7283 
7284     /*
7285      * Since its curr running now, convert the gran from real-time
7286      * to virtual-time in his units.
7287      *
7288      * By using 'se' instead of 'curr' we penalize light tasks, so
7289      * they get preempted easier. That is, if 'se' < 'curr' then
7290      * the resulting gran will be larger, therefore penalizing the
7291      * lighter, if otoh 'se' > 'curr' then the resulting gran will
7292      * be smaller, again penalizing the lighter task.
7293      *
7294      * This is especially important for buddies when the leftmost
7295      * task is higher priority than the buddy.
7296      */
7297     return calc_delta_fair(gran, se);
7298 }
7299 
7300 /*
7301  * Should 'se' preempt 'curr'.
7302  *
7303  *             |s1
7304  *        |s2
7305  *   |s3
7306  *         g
7307  *      |<--->|c
7308  *
7309  *  w(c, s1) = -1
7310  *  w(c, s2) =  0
7311  *  w(c, s3) =  1
7312  *
7313  */
wakeup_preempt_entity(struct sched_entity * curr,struct sched_entity * se)7314 static int wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
7315 {
7316     s64 gran, vdiff = curr->vruntime - se->vruntime;
7317 
7318 #ifdef CONFIG_SCHED_LATENCY_NICE
7319     vdiff += wakeup_latency_gran(curr, se);
7320 #endif
7321     if (vdiff <= 0) {
7322         return -1;
7323     }
7324 
7325     gran = wakeup_gran(se);
7326     if (vdiff > gran) {
7327         return 1;
7328     }
7329 
7330     return 0;
7331 }
7332 
set_last_buddy(struct sched_entity * se)7333 static void set_last_buddy(struct sched_entity *se)
7334 {
7335     if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) {
7336         return;
7337     }
7338 
7339     for_each_sched_entity(se) {
7340         if (SCHED_WARN_ON(!se->on_rq)) {
7341             return;
7342         }
7343         cfs_rq_of(se)->last = se;
7344     }
7345 }
7346 
set_next_buddy(struct sched_entity * se)7347 static void set_next_buddy(struct sched_entity *se)
7348 {
7349     if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) {
7350         return;
7351     }
7352 
7353     for_each_sched_entity(se) {
7354         if (SCHED_WARN_ON(!se->on_rq)) {
7355             return;
7356         }
7357         cfs_rq_of(se)->next = se;
7358     }
7359 }
7360 
set_skip_buddy(struct sched_entity * se)7361 static void set_skip_buddy(struct sched_entity *se)
7362 {
7363     for_each_sched_entity(se) cfs_rq_of(se)->skip = se;
7364 }
7365 
7366 /*
7367  * Preempt the current task with a newly woken task if needed:
7368  */
check_preempt_wakeup(struct rq * rq,struct task_struct * p,int wake_flags)7369 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
7370 {
7371     struct task_struct *curr = rq->curr;
7372     struct sched_entity *se = &curr->se, *pse = &p->se;
7373     struct cfs_rq *cfs_rq = task_cfs_rq(curr);
7374     int scale = cfs_rq->nr_running >= sched_nr_latency;
7375     int next_buddy_marked = 0;
7376 
7377     if (unlikely(se == pse)) {
7378         return;
7379     }
7380 
7381     /*
7382      * This is possible from callers such as attach_tasks(), in which we
7383      * unconditionally check_prempt_curr() after an enqueue (which may have
7384      * lead to a throttle).  This both saves work and prevents false
7385      * next-buddy nomination below.
7386      */
7387     if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) {
7388         return;
7389     }
7390 
7391     if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
7392         set_next_buddy(pse);
7393         next_buddy_marked = 1;
7394     }
7395 
7396     /*
7397      * We can come here with TIF_NEED_RESCHED already set from new task
7398      * wake up path.
7399      *
7400      * Note: this also catches the edge-case of curr being in a throttled
7401      * group (e.g. via set_curr_task), since update_curr() (in the
7402      * enqueue of curr) will have resulted in resched being set.  This
7403      * prevents us from potentially nominating it as a false LAST_BUDDY
7404      * below.
7405      */
7406     if (test_tsk_need_resched(curr)) {
7407         return;
7408     }
7409 
7410     /* Idle tasks are by definition preempted by non-idle tasks. */
7411     if (unlikely(task_has_idle_policy(curr)) && likely(!task_has_idle_policy(p))) {
7412         goto preempt;
7413     }
7414 
7415     /*
7416      * Batch and idle tasks do not preempt non-idle tasks (their preemption
7417      * is driven by the tick):
7418      */
7419     if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) {
7420         return;
7421     }
7422 
7423     find_matching_se(&se, &pse);
7424     update_curr(cfs_rq_of(se));
7425     BUG_ON(!pse);
7426     if (wakeup_preempt_entity(se, pse) == 1) {
7427         /*
7428          * Bias pick_next to pick the sched entity that is
7429          * triggering this preemption.
7430          */
7431         if (!next_buddy_marked) {
7432             set_next_buddy(pse);
7433         }
7434         goto preempt;
7435     }
7436 
7437     return;
7438 
7439 preempt:
7440     resched_curr(rq);
7441     /*
7442      * Only set the backward buddy when the current task is still
7443      * on the rq. This can happen when a wakeup gets interleaved
7444      * with schedule on the ->pre_schedule() or idle_balance()
7445      * point, either of which can * drop the rq lock.
7446      *
7447      * Also, during early boot the idle thread is in the fair class,
7448      * for obvious reasons its a bad idea to schedule back to it.
7449      */
7450     if (unlikely(!se->on_rq || curr == rq->idle)) {
7451         return;
7452     }
7453 
7454     if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) {
7455         set_last_buddy(se);
7456     }
7457 }
7458 
pick_next_task_fair(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)7459 struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
7460 {
7461     struct cfs_rq *cfs_rq = &rq->cfs;
7462     struct sched_entity *se;
7463     struct task_struct *p;
7464     int new_tasks;
7465 
7466     while (1) {
7467         if (!sched_fair_runnable(rq)) {
7468             goto idle;
7469         }
7470 
7471 #ifdef CONFIG_FAIR_GROUP_SCHED
7472         if (!prev || prev->sched_class != &fair_sched_class) {
7473             goto simple;
7474         }
7475 
7476         /*
7477          * Because of the set_next_buddy() in dequeue_task_fair() it is rather
7478          * likely that a next task is from the same cgroup as the current.
7479          *
7480          * Therefore attempt to avoid putting and setting the entire cgroup
7481          * hierarchy, only change the part that actually changes.
7482          */
7483 
7484         do {
7485             struct sched_entity *curr = cfs_rq->curr;
7486 
7487             /*
7488              * Since we got here without doing put_prev_entity() we also
7489              * have to consider cfs_rq->curr. If it is still a runnable
7490              * entity, update_curr() will update its vruntime, otherwise
7491              * forget we've ever seen it.
7492              */
7493             if (curr) {
7494                 if (curr->on_rq) {
7495                     update_curr(cfs_rq);
7496                 } else {
7497                     curr = NULL;
7498                 }
7499 
7500                 /*
7501                  * This call to check_cfs_rq_runtime() will do the
7502                  * throttle and dequeue its entity in the parent(s).
7503                  * Therefore the nr_running test will indeed
7504                  * be correct.
7505                  */
7506                 if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
7507                     cfs_rq = &rq->cfs;
7508 
7509                     if (!cfs_rq->nr_running) {
7510                         goto idle;
7511                     }
7512 
7513                     goto simple;
7514                 }
7515             }
7516 
7517             se = pick_next_entity(cfs_rq, curr);
7518             cfs_rq = group_cfs_rq(se);
7519         } while (cfs_rq);
7520 
7521         p = task_of(se);
7522         /*
7523          * Since we haven't yet done put_prev_entity and if the selected task
7524          * is a different task than we started out with, try and touch the
7525          * least amount of cfs_rqs.
7526          */
7527         if (prev != p) {
7528             struct sched_entity *pse = &prev->se;
7529 
7530             while (!(cfs_rq = is_same_group(se, pse))) {
7531                 int se_depth = se->depth;
7532                 int pse_depth = pse->depth;
7533 
7534                 if (se_depth <= pse_depth) {
7535                     put_prev_entity(cfs_rq_of(pse), pse);
7536                     pse = parent_entity(pse);
7537                 }
7538                 if (se_depth >= pse_depth) {
7539                     set_next_entity(cfs_rq_of(se), se);
7540                     se = parent_entity(se);
7541                 }
7542             }
7543 
7544             put_prev_entity(cfs_rq, pse);
7545             set_next_entity(cfs_rq, se);
7546         }
7547 
7548         goto done;
7549     simple:
7550 #endif
7551         if (prev) {
7552             put_prev_task(rq, prev);
7553         }
7554 
7555         do {
7556             se = pick_next_entity(cfs_rq, NULL);
7557             set_next_entity(cfs_rq, se);
7558             cfs_rq = group_cfs_rq(se);
7559         } while (cfs_rq);
7560 
7561         p = task_of(se);
7562 
7563     done:
7564         __maybe_unused;
7565 #ifdef CONFIG_SMP
7566         /*
7567          * Move the next running task to the front of
7568          * the list, so our cfs_tasks list becomes MRU
7569          * one.
7570          */
7571         list_move(&p->se.group_node, &rq->cfs_tasks);
7572 #endif
7573 
7574         if (hrtick_enabled(rq)) {
7575             hrtick_start_fair(rq, p);
7576         }
7577 
7578         update_misfit_status(p, rq);
7579 
7580         return p;
7581 
7582     idle:
7583         if (!rf) {
7584             return NULL;
7585         }
7586 
7587         new_tasks = newidle_balance(rq, rf);
7588         /*
7589          * Because newidle_balance() releases (and re-acquires) rq->lock, it is
7590          * possible for any higher priority task to appear. In that case we
7591          * must re-start the pick_next_entity() loop.
7592          */
7593         if (new_tasks < 0) {
7594             return RETRY_TASK;
7595         }
7596 
7597         if (new_tasks > 0) {
7598             continue;
7599         }
7600         break;
7601     }
7602 
7603     /*
7604      * rq is about to be idle, check if we need to update the
7605      * lost_idle_time of clock_pelt
7606      */
7607     update_idle_rq_clock_pelt(rq);
7608 
7609     return NULL;
7610 }
7611 
fair_pick_next_task_fair(struct rq * rq)7612 static struct task_struct *fair_pick_next_task_fair(struct rq *rq)
7613 {
7614     return pick_next_task_fair(rq, NULL, NULL);
7615 }
7616 
7617 /*
7618  * Account for a descheduled task
7619  */
put_prev_task_fair(struct rq * rq,struct task_struct * prev)7620 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
7621 {
7622     struct sched_entity *se = &prev->se;
7623     struct cfs_rq *cfs_rq;
7624 
7625     for_each_sched_entity(se) {
7626         cfs_rq = cfs_rq_of(se);
7627         put_prev_entity(cfs_rq, se);
7628     }
7629 }
7630 
7631 /*
7632  * sched_yield() is very simple
7633  *
7634  * The magic of dealing with the ->skip buddy is in pick_next_entity.
7635  */
yield_task_fair(struct rq * rq)7636 static void yield_task_fair(struct rq *rq)
7637 {
7638     struct task_struct *curr = rq->curr;
7639     struct cfs_rq *cfs_rq = task_cfs_rq(curr);
7640     struct sched_entity *se = &curr->se;
7641 
7642     /*
7643      * Are we the only task in the tree?
7644      */
7645     if (unlikely(rq->nr_running == 1)) {
7646         return;
7647     }
7648 
7649     clear_buddies(cfs_rq, se);
7650 
7651     if (curr->policy != SCHED_BATCH) {
7652         update_rq_clock(rq);
7653         /*
7654          * Update run-time statistics of the 'current'.
7655          */
7656         update_curr(cfs_rq);
7657         /*
7658          * Tell update_rq_clock() that we've just updated,
7659          * so we don't do microscopic update in schedule()
7660          * and double the fastpath cost.
7661          */
7662         rq_clock_skip_update(rq);
7663     }
7664 
7665     set_skip_buddy(se);
7666 }
7667 
yield_to_task_fair(struct rq * rq,struct task_struct * p)7668 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
7669 {
7670     struct sched_entity *se = &p->se;
7671 
7672     /* throttled hierarchies are not runnable */
7673     if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) {
7674         return false;
7675     }
7676 
7677     /* Tell the scheduler that we'd really like pse to run next. */
7678     set_next_buddy(se);
7679 
7680     yield_task_fair(rq);
7681 
7682     return true;
7683 }
7684 
7685 #ifdef CONFIG_SMP
7686 /**************************************************
7687  * Fair scheduling class load-balancing methods.
7688  *
7689  * BASICS
7690  *
7691  * The purpose of load-balancing is to achieve the same basic fairness the
7692  * per-CPU scheduler provides, namely provide a proportional amount of compute
7693  * time to each task. This is expressed in the following equation:
7694  *
7695  *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
7696  *
7697  * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
7698  * W_i,0 is defined as:
7699  *
7700  *   W_i,0 = \Sum_j w_i,j                                             (2)
7701  *
7702  * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
7703  * is derived from the nice value as per sched_prio_to_weight[].
7704  *
7705  * The weight average is an exponential decay average of the instantaneous
7706  * weight:
7707  *
7708  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
7709  *
7710  * C_i is the compute capacity of CPU i, typically it is the
7711  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
7712  * can also include other factors [XXX].
7713  *
7714  * To achieve this balance we define a measure of imbalance which follows
7715  * directly from (1):
7716  *
7717  *   imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j }    (4)
7718  *
7719  * We them move tasks around to minimize the imbalance. In the continuous
7720  * function space it is obvious this converges, in the discrete case we get
7721  * a few fun cases generally called infeasible weight scenarios.
7722  *
7723  * [XXX expand on:
7724  *     - infeasible weights;
7725  *     - local vs global optima in the discrete case. ]
7726  *
7727  *
7728  * SCHED DOMAINS
7729  *
7730  * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
7731  * for all i,j solution, we create a tree of CPUs that follows the hardware
7732  * topology where each level pairs two lower groups (or better). This results
7733  * in O(log n) layers. Furthermore we reduce the number of CPUs going up the
7734  * tree to only the first of the previous level and we decrease the frequency
7735  * of load-balance at each level inv. proportional to the number of CPUs in
7736  * the groups.
7737  *
7738  * This yields:
7739  *
7740  *     log_2 n     1     n
7741  *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
7742  *     i = 0      2^i   2^i
7743  *                               `- size of each group
7744  *         |         |     `- number of CPUs doing load-balance
7745  *         |         `- freq
7746  *         `- sum over all levels
7747  *
7748  * Coupled with a limit on how many tasks we can migrate every balance pass,
7749  * this makes (5) the runtime complexity of the balancer.
7750  *
7751  * An important property here is that each CPU is still (indirectly) connected
7752  * to every other CPU in at most O(log n) steps:
7753  *
7754  * The adjacency matrix of the resulting graph is given by:
7755  *
7756  *             log_2 n
7757  *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
7758  *             k = 0
7759  *
7760  * And you'll find that:
7761  *
7762  *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
7763  *
7764  * Showing there's indeed a path between every CPU in at most O(log n) steps.
7765  * The task movement gives a factor of O(m), giving a convergence complexity
7766  * of:
7767  *
7768  *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
7769  *
7770  *
7771  * WORK CONSERVING
7772  *
7773  * In order to avoid CPUs going idle while there's still work to do, new idle
7774  * balancing is more aggressive and has the newly idle CPU iterate up the domain
7775  * tree itself instead of relying on other CPUs to bring it work.
7776  *
7777  * This adds some complexity to both (5) and (8) but it reduces the total idle
7778  * time.
7779  *
7780  * [XXX more?]
7781  *
7782  *
7783  * CGROUPS
7784  *
7785  * Cgroups make a horror show out of (2), instead of a simple sum we get:
7786  *
7787  *                                s_k,i
7788  *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
7789  *                                 S_k
7790  *
7791  * Where
7792  *
7793  *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
7794  *
7795  * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
7796  *
7797  * The big problem is S_k, its a global sum needed to compute a local (W_i)
7798  * property.
7799  *
7800  * [XXX write more on how we solve this.. _after_ merging pjt's patches that
7801  *      rewrite all of this once again.]
7802  */
7803 
7804 static unsigned long __read_mostly max_load_balance_interval = HZ / 10;
7805 
7806 enum fbq_type { regular, remote, all };
7807 
7808 /*
7809  * 'group_type' describes the group of CPUs at the moment of load balancing.
7810  *
7811  * The enum is ordered by pulling priority, with the group with lowest priority
7812  * first so the group_type can simply be compared when selecting the busiest
7813  * group. See update_sd_pick_busiest().
7814  */
7815 enum group_type {
7816     /* The group has spare capacity that can be used to run more tasks.  */
7817     group_has_spare = 0,
7818     /*
7819      * The group is fully used and the tasks don't compete for more CPU
7820      * cycles. Nevertheless, some tasks might wait before running.
7821      */
7822     group_fully_busy,
7823     /*
7824      * SD_ASYM_CPUCAPACITY only: One task doesn't fit with CPU's capacity
7825      * and must be migrated to a more powerful CPU.
7826      */
7827     group_misfit_task,
7828     /*
7829      * SD_ASYM_PACKING only: One local CPU with higher capacity is available,
7830      * and the task should be migrated to it instead of running on the
7831      * current CPU.
7832      */
7833     group_asym_packing,
7834     /*
7835      * The tasks' affinity constraints previously prevented the scheduler
7836      * from balancing the load across the system.
7837      */
7838     group_imbalanced,
7839     /*
7840      * The CPU is overloaded and can't provide expected CPU cycles to all
7841      * tasks.
7842      */
7843     group_overloaded
7844 };
7845 
7846 enum migration_type { migrate_load = 0, migrate_util, migrate_task, migrate_misfit };
7847 
7848 #define LBF_ALL_PINNED 0x01
7849 #define LBF_NEED_BREAK 0x02
7850 #define LBF_DST_PINNED 0x04
7851 #define LBF_SOME_PINNED 0x08
7852 #define LBF_NOHZ_STATS 0x10
7853 #define LBF_NOHZ_AGAIN 0x20
7854 #define LBF_IGNORE_PREFERRED_CLUSTER_TASKS 0x200
7855 
7856 struct lb_env {
7857     struct sched_domain *sd;
7858 
7859     struct rq *src_rq;
7860     int src_cpu;
7861 
7862     int dst_cpu;
7863     struct rq *dst_rq;
7864 
7865     struct cpumask *dst_grpmask;
7866     int new_dst_cpu;
7867     enum cpu_idle_type idle;
7868     long imbalance;
7869     /* The set of CPUs under consideration for load-balancing */
7870     struct cpumask *cpus;
7871 
7872     unsigned int flags;
7873 
7874     unsigned int loop;
7875     unsigned int loop_break;
7876     unsigned int loop_max;
7877 
7878     enum fbq_type fbq_type;
7879     enum migration_type migration_type;
7880     struct list_head tasks;
7881 };
7882 
7883 /*
7884  * Is this task likely cache-hot:
7885  */
task_hot(struct task_struct * p,struct lb_env * env)7886 static int task_hot(struct task_struct *p, struct lb_env *env)
7887 {
7888     s64 delta;
7889 
7890     lockdep_assert_held(&env->src_rq->lock);
7891 
7892     if (p->sched_class != &fair_sched_class) {
7893         return 0;
7894     }
7895 
7896     if (unlikely(task_has_idle_policy(p))) {
7897         return 0;
7898     }
7899 
7900     /* SMT siblings share cache */
7901     if (env->sd->flags & SD_SHARE_CPUCAPACITY) {
7902         return 0;
7903     }
7904 
7905     /*
7906      * Buddy candidates are cache hot:
7907      */
7908     if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
7909         (&p->se == cfs_rq_of(&p->se)->next || &p->se == cfs_rq_of(&p->se)->last)) {
7910         return 1;
7911     }
7912 
7913     if (sysctl_sched_migration_cost == -1) {
7914         return 1;
7915     }
7916     if (sysctl_sched_migration_cost == 0) {
7917         return 0;
7918     }
7919 
7920     delta = rq_clock_task(env->src_rq) - p->se.exec_start;
7921 
7922     return delta < (s64)sysctl_sched_migration_cost;
7923 }
7924 
7925 #ifdef CONFIG_NUMA_BALANCING
7926 /*
7927  * Returns 1, if task migration degrades locality
7928  * Returns 0, if task migration improves locality i.e migration preferred.
7929  * Returns -1, if task migration is not affected by locality.
7930  */
migrate_degrades_locality(struct task_struct * p,struct lb_env * env)7931 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
7932 {
7933     struct numa_group *numa_group = rcu_dereference(p->numa_group);
7934     unsigned long src_weight, dst_weight;
7935     int src_nid, dst_nid, dist;
7936 
7937     if (!static_branch_likely(&sched_numa_balancing)) {
7938         return -1;
7939     }
7940 
7941     if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) {
7942         return -1;
7943     }
7944 
7945     src_nid = cpu_to_node(env->src_cpu);
7946     dst_nid = cpu_to_node(env->dst_cpu);
7947     if (src_nid == dst_nid) {
7948         return -1;
7949     }
7950 
7951     /* Migrating away from the preferred node is always bad. */
7952     if (src_nid == p->numa_preferred_nid) {
7953         if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) {
7954             return 1;
7955         } else {
7956             return -1;
7957         }
7958     }
7959 
7960     /* Encourage migration to the preferred node. */
7961     if (dst_nid == p->numa_preferred_nid) {
7962         return 0;
7963     }
7964 
7965     /* Leaving a core idle is often worse than degrading locality. */
7966     if (env->idle == CPU_IDLE) {
7967         return -1;
7968     }
7969 
7970     dist = node_distance(src_nid, dst_nid);
7971     if (numa_group) {
7972         src_weight = group_weight(p, src_nid, dist);
7973         dst_weight = group_weight(p, dst_nid, dist);
7974     } else {
7975         src_weight = task_weight(p, src_nid, dist);
7976         dst_weight = task_weight(p, dst_nid, dist);
7977     }
7978 
7979     return dst_weight < src_weight;
7980 }
7981 
7982 #else
migrate_degrades_locality(struct task_struct * p,struct lb_env * env)7983 static inline int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
7984 {
7985     return -1;
7986 }
7987 #endif
7988 
7989 /*
7990  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
7991  */
can_migrate_task(struct task_struct * p,struct lb_env * env)7992 static int can_migrate_task(struct task_struct *p, struct lb_env *env)
7993 {
7994     int tsk_cache_hot;
7995 
7996     lockdep_assert_held(&env->src_rq->lock);
7997 
7998     /*
7999      * We do not migrate tasks that are:
8000      * 1) throttled_lb_pair, or
8001      * 2) cannot be migrated to this CPU due to cpus_ptr, or
8002      * 3) running (obviously), or
8003      * 4) are cache-hot on their current CPU.
8004      */
8005     if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) {
8006         return 0;
8007     }
8008 
8009     /* Disregard pcpu kthreads; they are where they need to be. */
8010     if (kthread_is_per_cpu(p)) {
8011         return 0;
8012     }
8013 
8014     if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
8015         int cpu;
8016 
8017         schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
8018 
8019         env->flags |= LBF_SOME_PINNED;
8020 
8021         /*
8022          * Remember if this task can be migrated to any other CPU in
8023          * our sched_group. We may want to revisit it if we couldn't
8024          * meet load balance goals by pulling other tasks on src_cpu.
8025          *
8026          * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have
8027          * already computed one in current iteration.
8028          */
8029         if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) {
8030             return 0;
8031         }
8032 
8033         /* Prevent to re-select dst_cpu via env's CPUs: */
8034         for_each_cpu_and(cpu, env->dst_grpmask, env->cpus)
8035         {
8036             if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
8037                 env->flags |= LBF_DST_PINNED;
8038                 env->new_dst_cpu = cpu;
8039                 break;
8040             }
8041         }
8042 
8043         return 0;
8044     }
8045 
8046     /* Record that we found atleast one task that could run on dst_cpu */
8047     env->flags &= ~LBF_ALL_PINNED;
8048 
8049 #ifdef CONFIG_SCHED_RTG
8050     if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS && !preferred_cluster(cpu_rq(env->dst_cpu)->cluster, p)) {
8051         return 0;
8052     }
8053 #endif
8054 
8055     if (task_running(env->src_rq, p)) {
8056         schedstat_inc(p->se.statistics.nr_failed_migrations_running);
8057         return 0;
8058     }
8059 
8060     /*
8061      * Aggressive migration if:
8062      * 1) destination numa is preferred
8063      * 2) task is cache cold, or
8064      * 3) too many balance attempts have failed.
8065      */
8066     tsk_cache_hot = migrate_degrades_locality(p, env);
8067     if (tsk_cache_hot == -1) {
8068         tsk_cache_hot = task_hot(p, env);
8069     }
8070 
8071     if (tsk_cache_hot <= 0 || env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
8072         if (tsk_cache_hot == 1) {
8073             schedstat_inc(env->sd->lb_hot_gained[env->idle]);
8074             schedstat_inc(p->se.statistics.nr_forced_migrations);
8075         }
8076         return 1;
8077     }
8078 
8079     schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
8080     return 0;
8081 }
8082 
8083 /*
8084  * detach_task() -- detach the task for the migration specified in env
8085  */
detach_task(struct task_struct * p,struct lb_env * env)8086 static void detach_task(struct task_struct *p, struct lb_env *env)
8087 {
8088     lockdep_assert_held(&env->src_rq->lock);
8089 
8090     deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
8091 #ifdef CONFIG_SCHED_WALT
8092     double_lock_balance(env->src_rq, env->dst_rq);
8093     if (!(env->src_rq->clock_update_flags & RQCF_UPDATED)) {
8094         update_rq_clock(env->src_rq);
8095     }
8096 #endif
8097     set_task_cpu(p, env->dst_cpu);
8098 #ifdef CONFIG_SCHED_WALT
8099     double_unlock_balance(env->src_rq, env->dst_rq);
8100 #endif
8101 }
8102 
8103 /*
8104  * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
8105  * part of active balancing operations within "domain".
8106  *
8107  * Returns a task if successful and NULL otherwise.
8108  */
detach_one_task(struct lb_env * env)8109 static struct task_struct *detach_one_task(struct lb_env *env)
8110 {
8111     struct task_struct *p;
8112 
8113     lockdep_assert_held(&env->src_rq->lock);
8114 
8115     list_for_each_entry_reverse(p, &env->src_rq->cfs_tasks, se.group_node)
8116     {
8117         if (!can_migrate_task(p, env)) {
8118             continue;
8119         }
8120 
8121         detach_task(p, env);
8122 
8123         /*
8124          * Right now, this is only the second place where
8125          * lb_gained[env->idle] is updated (other is detach_tasks)
8126          * so we can safely collect stats here rather than
8127          * inside detach_tasks().
8128          */
8129         schedstat_inc(env->sd->lb_gained[env->idle]);
8130         return p;
8131     }
8132     return NULL;
8133 }
8134 
8135 static const unsigned int sched_nr_migrate_break = 32;
8136 
8137 /*
8138  * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
8139  * busiest_rq, as part of a balancing operation within domain "sd".
8140  *
8141  * Returns number of detached tasks if successful and 0 otherwise.
8142  */
detach_tasks(struct lb_env * env)8143 static int detach_tasks(struct lb_env *env)
8144 {
8145     struct list_head *tasks = &env->src_rq->cfs_tasks;
8146     unsigned long util, load;
8147     struct task_struct *p;
8148     int detached = 0;
8149 #ifdef CONFIG_SCHED_RTG
8150     int orig_loop = env->loop;
8151 #endif
8152 
8153     lockdep_assert_held(&env->src_rq->lock);
8154 
8155     if (env->imbalance <= 0) {
8156         return 0;
8157     }
8158 
8159 #ifdef CONFIG_SCHED_RTG
8160     if (!same_cluster(env->dst_cpu, env->src_cpu)) {
8161         env->flags |= LBF_IGNORE_PREFERRED_CLUSTER_TASKS;
8162     }
8163 
8164 redo:
8165 #endif
8166     while (!list_empty(tasks)) {
8167         /*
8168          * We don't want to steal all, otherwise we may be treated likewise,
8169          * which could at worst lead to a livelock crash.
8170          */
8171         if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) {
8172             break;
8173         }
8174 
8175         p = list_last_entry(tasks, struct task_struct, se.group_node);
8176 
8177         env->loop++;
8178         /* We've more or less seen every task there is, call it quits */
8179         if (env->loop > env->loop_max) {
8180             break;
8181         }
8182 
8183         /* take a breather every nr_migrate tasks */
8184         if (env->loop > env->loop_break) {
8185             env->loop_break += sched_nr_migrate_break;
8186             env->flags |= LBF_NEED_BREAK;
8187             break;
8188         }
8189 
8190         if (!can_migrate_task(p, env)) {
8191             goto next;
8192         }
8193 
8194         switch (env->migration_type) {
8195             case migrate_load:
8196                 /*
8197                  * Depending of the number of CPUs and tasks and the
8198                  * cgroup hierarchy, task_h_load() can return a null
8199                  * value. Make sure that env->imbalance decreases
8200                  * otherwise detach_tasks() will stop only after
8201                  * detaching up to loop_max tasks.
8202                  */
8203                 load = max_t(unsigned long, task_h_load(p), 1);
8204 
8205                 if (sched_feat(LB_MIN) && load < 0x10 && !env->sd->nr_balance_failed) {
8206                     goto next;
8207                 }
8208 
8209                 /*
8210                  * Make sure that we don't migrate too much load.
8211                  * Nevertheless, let relax the constraint if
8212                  * scheduler fails to find a good waiting task to
8213                  * migrate.
8214                  */
8215                 if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance) {
8216                     goto next;
8217                 }
8218 
8219                 env->imbalance -= load;
8220                 break;
8221 
8222             case migrate_util:
8223                 util = task_util_est(p);
8224                 if (util > env->imbalance) {
8225                     goto next;
8226                 }
8227 
8228                 env->imbalance -= util;
8229                 break;
8230 
8231             case migrate_task:
8232                 env->imbalance--;
8233                 break;
8234 
8235             case migrate_misfit:
8236                 /* This is not a misfit task */
8237                 if (task_fits_capacity(p, capacity_of(env->src_cpu))) {
8238                     goto next;
8239                 }
8240 
8241                 env->imbalance = 0;
8242                 break;
8243         }
8244 
8245         detach_task(p, env);
8246         list_add(&p->se.group_node, &env->tasks);
8247 
8248         detached++;
8249 
8250 #ifdef CONFIG_PREEMPTION
8251         /*
8252          * NEWIDLE balancing is a source of latency, so preemptible
8253          * kernels will stop after the first task is detached to minimize
8254          * the critical section.
8255          */
8256         if (env->idle == CPU_NEWLY_IDLE) {
8257             break;
8258         }
8259 #endif
8260 
8261         /*
8262          * We only want to steal up to the prescribed amount of
8263          * load/util/tasks.
8264          */
8265         if (env->imbalance <= 0) {
8266             break;
8267         }
8268 
8269         continue;
8270     next:
8271         list_move(&p->se.group_node, tasks);
8272     }
8273 
8274 #ifdef CONFIG_SCHED_RTG
8275     if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS && !detached) {
8276         tasks = &env->src_rq->cfs_tasks;
8277         env->flags &= ~LBF_IGNORE_PREFERRED_CLUSTER_TASKS;
8278         env->loop = orig_loop;
8279         goto redo;
8280     }
8281 #endif
8282 
8283     /*
8284      * Right now, this is one of only two places we collect this stat
8285      * so we can safely collect detach_one_task() stats here rather
8286      * than inside detach_one_task().
8287      */
8288     schedstat_add(env->sd->lb_gained[env->idle], detached);
8289 
8290     return detached;
8291 }
8292 
8293 /*
8294  * attach_task() -- attach the task detached by detach_task() to its new rq.
8295  */
attach_task(struct rq * rq,struct task_struct * p)8296 static void attach_task(struct rq *rq, struct task_struct *p)
8297 {
8298     lockdep_assert_held(&rq->lock);
8299 
8300     BUG_ON(task_rq(p) != rq);
8301     activate_task(rq, p, ENQUEUE_NOCLOCK);
8302     check_preempt_curr(rq, p, 0);
8303 }
8304 
8305 /*
8306  * attach_one_task() -- attaches the task returned from detach_one_task() to
8307  * its new rq.
8308  */
attach_one_task(struct rq * rq,struct task_struct * p)8309 static void attach_one_task(struct rq *rq, struct task_struct *p)
8310 {
8311     struct rq_flags rf;
8312 
8313     rq_lock(rq, &rf);
8314     update_rq_clock(rq);
8315     attach_task(rq, p);
8316     rq_unlock(rq, &rf);
8317 }
8318 
8319 /*
8320  * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
8321  * new rq.
8322  */
attach_tasks(struct lb_env * env)8323 static void attach_tasks(struct lb_env *env)
8324 {
8325     struct list_head *tasks = &env->tasks;
8326     struct task_struct *p;
8327     struct rq_flags rf;
8328 
8329     rq_lock(env->dst_rq, &rf);
8330     update_rq_clock(env->dst_rq);
8331 
8332     while (!list_empty(tasks)) {
8333         p = list_first_entry(tasks, struct task_struct, se.group_node);
8334         list_del_init(&p->se.group_node);
8335 
8336         attach_task(env->dst_rq, p);
8337     }
8338 
8339     rq_unlock(env->dst_rq, &rf);
8340 }
8341 
8342 #ifdef CONFIG_NO_HZ_COMMON
cfs_rq_has_blocked(struct cfs_rq * cfs_rq)8343 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
8344 {
8345     if (cfs_rq->avg.load_avg) {
8346         return true;
8347     }
8348 
8349     if (cfs_rq->avg.util_avg) {
8350         return true;
8351     }
8352 
8353     return false;
8354 }
8355 
others_have_blocked(struct rq * rq)8356 static inline bool others_have_blocked(struct rq *rq)
8357 {
8358     if (READ_ONCE(rq->avg_rt.util_avg)) {
8359         return true;
8360     }
8361 
8362     if (READ_ONCE(rq->avg_dl.util_avg)) {
8363         return true;
8364     }
8365 
8366     if (thermal_load_avg(rq)) {
8367         return true;
8368     }
8369 
8370 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
8371     if (READ_ONCE(rq->avg_irq.util_avg)) {
8372         return true;
8373     }
8374 #endif
8375 
8376     return false;
8377 }
8378 
update_blocked_load_status(struct rq * rq,bool has_blocked)8379 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
8380 {
8381     rq->last_blocked_load_update_tick = jiffies;
8382 
8383     if (!has_blocked) {
8384         rq->has_blocked_load = 0;
8385     }
8386 }
8387 #else
cfs_rq_has_blocked(struct cfs_rq * cfs_rq)8388 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
8389 {
8390     return false;
8391 }
others_have_blocked(struct rq * rq)8392 static inline bool others_have_blocked(struct rq *rq)
8393 {
8394     return false;
8395 }
update_blocked_load_status(struct rq * rq,bool has_blocked)8396 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
8397 {
8398 }
8399 #endif
8400 
fair_update_blocked_others(struct rq * rq,bool * done)8401 static bool fair_update_blocked_others(struct rq *rq, bool *done)
8402 {
8403     const struct sched_class *curr_class;
8404     u64 now = rq_clock_pelt(rq);
8405     unsigned long thermal_pressure;
8406     bool decayed;
8407 
8408     /*
8409      * update_load_avg() can call cpufreq_update_util(). Make sure that RT,
8410      * DL and IRQ signals have been updated before updating CFS.
8411      */
8412     curr_class = rq->curr->sched_class;
8413 
8414     thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
8415 
8416     decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
8417               update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
8418               update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) | update_irq_load_avg(rq, 0);
8419 
8420     if (others_have_blocked(rq)) {
8421         *done = false;
8422     }
8423 
8424     return decayed;
8425 }
8426 
8427 #ifdef CONFIG_FAIR_GROUP_SCHED
8428 
cfs_rq_is_decayed(struct cfs_rq * cfs_rq)8429 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
8430 {
8431     if (cfs_rq->load.weight) {
8432         return false;
8433     }
8434 
8435     if (cfs_rq->avg.load_sum) {
8436         return false;
8437     }
8438 
8439     if (cfs_rq->avg.util_sum) {
8440         return false;
8441     }
8442 
8443     if (cfs_rq->avg.runnable_sum) {
8444         return false;
8445     }
8446 
8447     return true;
8448 }
8449 
fair_update_blocked_fair(struct rq * rq,bool * done)8450 static bool fair_update_blocked_fair(struct rq *rq, bool *done)
8451 {
8452     struct cfs_rq *cfs_rq, *pos;
8453     bool decayed = false;
8454     int cpu = cpu_of(rq);
8455 
8456     /*
8457      * Iterates the task_group tree in a bottom up fashion, see
8458      * list_add_leaf_cfs_rq() for details.
8459      */
8460     for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
8461         struct sched_entity *se;
8462 
8463         if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
8464             update_tg_load_avg(cfs_rq);
8465 
8466             if (cfs_rq == &rq->cfs) {
8467                 decayed = true;
8468             }
8469         }
8470 
8471         /* Propagate pending load changes to the parent, if any: */
8472         se = cfs_rq->tg->se[cpu];
8473         if (se && !skip_blocked_update(se)) {
8474             update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
8475         }
8476 
8477         /*
8478          * There can be a lot of idle CPU cgroups.  Don't let fully
8479          * decayed cfs_rqs linger on the list.
8480          */
8481         if (cfs_rq_is_decayed(cfs_rq)) {
8482             list_del_leaf_cfs_rq(cfs_rq);
8483         }
8484 
8485         /* Don't need periodic decay once load/util_avg are null */
8486         if (cfs_rq_has_blocked(cfs_rq)) {
8487             *done = false;
8488         }
8489     }
8490 
8491     return decayed;
8492 }
8493 
8494 /*
8495  * Compute the hierarchical load factor for cfs_rq and all its ascendants.
8496  * This needs to be done in a top-down fashion because the load of a child
8497  * group is a fraction of its parents load.
8498  */
update_cfs_rq_h_load(struct cfs_rq * cfs_rq)8499 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
8500 {
8501     struct rq *rq = rq_of(cfs_rq);
8502     struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
8503     unsigned long now = jiffies;
8504     unsigned long load;
8505 
8506     if (cfs_rq->last_h_load_update == now) {
8507         return;
8508     }
8509 
8510     WRITE_ONCE(cfs_rq->h_load_next, NULL);
8511     for_each_sched_entity(se) {
8512         cfs_rq = cfs_rq_of(se);
8513         WRITE_ONCE(cfs_rq->h_load_next, se);
8514         if (cfs_rq->last_h_load_update == now) {
8515             break;
8516         }
8517     }
8518 
8519     if (!se) {
8520         cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
8521         cfs_rq->last_h_load_update = now;
8522     }
8523 
8524     while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
8525         load = cfs_rq->h_load;
8526         load = div64_ul(load * se->avg.load_avg, cfs_rq_load_avg(cfs_rq) + 1);
8527         cfs_rq = group_cfs_rq(se);
8528         cfs_rq->h_load = load;
8529         cfs_rq->last_h_load_update = now;
8530     }
8531 }
8532 
task_h_load(struct task_struct * p)8533 static unsigned long task_h_load(struct task_struct *p)
8534 {
8535     struct cfs_rq *cfs_rq = task_cfs_rq(p);
8536 
8537     update_cfs_rq_h_load(cfs_rq);
8538     return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, cfs_rq_load_avg(cfs_rq) + 1);
8539 }
8540 #else
fair_update_blocked_fair(struct rq * rq,bool * done)8541 static bool fair_update_blocked_fair(struct rq *rq, bool *done)
8542 {
8543     struct cfs_rq *cfs_rq = &rq->cfs;
8544     bool decayed;
8545 
8546     decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
8547     if (cfs_rq_has_blocked(cfs_rq)) {
8548         *done = false;
8549     }
8550 
8551     return decayed;
8552 }
8553 
task_h_load(struct task_struct * p)8554 static unsigned long task_h_load(struct task_struct *p)
8555 {
8556     return p->se.avg.load_avg;
8557 }
8558 #endif
8559 
update_blocked_averages(int cpu)8560 static void update_blocked_averages(int cpu)
8561 {
8562     bool decayed = false, done = true;
8563     struct rq *rq = cpu_rq(cpu);
8564     struct rq_flags rf;
8565 
8566     rq_lock_irqsave(rq, &rf);
8567     update_rq_clock(rq);
8568 
8569     decayed |= fair_update_blocked_others(rq, &done);
8570     decayed |= fair_update_blocked_fair(rq, &done);
8571 
8572     update_blocked_load_status(rq, !done);
8573     if (decayed) {
8574         cpufreq_update_util(rq, 0);
8575     }
8576     rq_unlock_irqrestore(rq, &rf);
8577 }
8578 
8579 /********** Helpers for find_busiest_group ************************/
8580 
8581 /*
8582  * sg_lb_stats - stats of a sched_group required for load_balancing
8583  */
8584 struct sg_lb_stats {
8585     unsigned long avg_load;   /* Avg load across the CPUs of the group */
8586     unsigned long group_load; /* Total load over the CPUs of the group */
8587     unsigned long group_capacity;
8588     unsigned long group_util;      /* Total utilization over the CPUs of the group */
8589     unsigned long group_runnable;  /* Total runnable time over the CPUs of the group */
8590     unsigned int sum_nr_running;   /* Nr of tasks running in the group */
8591     unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
8592     unsigned int idle_cpus;
8593     unsigned int group_weight;
8594     enum group_type group_type;
8595     unsigned int group_asym_packing;      /* Tasks should be moved to preferred CPU */
8596     unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
8597 #ifdef CONFIG_NUMA_BALANCING
8598     unsigned int nr_numa_running;
8599     unsigned int nr_preferred_running;
8600 #endif
8601 };
8602 
8603 /*
8604  * sd_lb_stats - Structure to store the statistics of a sched_domain
8605  *         during load balancing.
8606  */
8607 struct sd_lb_stats {
8608     struct sched_group *busiest;  /* Busiest group in this sd */
8609     struct sched_group *local;    /* Local group in this sd */
8610     unsigned long total_load;     /* Total load of all groups in sd */
8611     unsigned long total_capacity; /* Total capacity of all groups in sd */
8612     unsigned long avg_load;       /* Average load across all groups in sd */
8613     unsigned int prefer_sibling;  /* tasks should go to sibling first */
8614 
8615     struct sg_lb_stats busiest_stat; /* Statistics of the busiest group */
8616     struct sg_lb_stats local_stat;   /* Statistics of the local group */
8617 };
8618 
init_sd_lb_stats(struct sd_lb_stats * sds)8619 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
8620 {
8621     /*
8622      * Skimp on the clearing to avoid duplicate work. We can avoid clearing
8623      * local_stat because update_sg_lb_stats() does a full clear/assignment.
8624      * We must however set busiest_stat::group_type and
8625      * busiest_stat::idle_cpus to the worst busiest group because
8626      * update_sd_pick_busiest() reads these before assignment.
8627      */
8628     *sds = (struct sd_lb_stats) {
8629         .busiest = NULL,
8630         .local = NULL,
8631         .total_load = 0UL,
8632         .total_capacity = 0UL,
8633         .busiest_stat =
8634             {
8635                 .idle_cpus = UINT_MAX,
8636                 .group_type = group_has_spare,
8637             },
8638     };
8639 }
8640 
scale_rt_capacity(int cpu)8641 static unsigned long scale_rt_capacity(int cpu)
8642 {
8643     struct rq *rq = cpu_rq(cpu);
8644     unsigned long max = arch_scale_cpu_capacity(cpu);
8645     unsigned long used, free;
8646     unsigned long irq;
8647 
8648     irq = cpu_util_irq(rq);
8649     if (unlikely(irq >= max)) {
8650         return 1;
8651     }
8652 
8653     /*
8654      * avg_rt.util_avg and avg_dl.util_avg track binary signals
8655      * (running and not running) with weights 0 and 1024 respectively.
8656      * avg_thermal.load_avg tracks thermal pressure and the weighted
8657      * average uses the actual delta max capacity(load).
8658      */
8659     used = READ_ONCE(rq->avg_rt.util_avg);
8660     used += READ_ONCE(rq->avg_dl.util_avg);
8661     used += thermal_load_avg(rq);
8662     if (unlikely(used >= max)) {
8663         return 1;
8664     }
8665 
8666     free = max - used;
8667 
8668     return scale_irq_capacity(free, irq, max);
8669 }
8670 
update_cpu_capacity(struct sched_domain * sd,int cpu)8671 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
8672 {
8673     unsigned long capacity = scale_rt_capacity(cpu);
8674     struct sched_group *sdg = sd->groups;
8675 
8676     cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
8677 
8678     if (!capacity) {
8679         capacity = 1;
8680     }
8681 
8682     cpu_rq(cpu)->cpu_capacity = capacity;
8683     trace_sched_cpu_capacity_tp(cpu_rq(cpu));
8684 
8685     sdg->sgc->capacity = capacity;
8686     sdg->sgc->min_capacity = capacity;
8687     sdg->sgc->max_capacity = capacity;
8688 }
8689 
update_group_capacity(struct sched_domain * sd,int cpu)8690 void update_group_capacity(struct sched_domain *sd, int cpu)
8691 {
8692     struct sched_domain *child = sd->child;
8693     struct sched_group *group, *sdg = sd->groups;
8694     unsigned long capacity, min_capacity, max_capacity;
8695     unsigned long interval;
8696 
8697     interval = msecs_to_jiffies(sd->balance_interval);
8698     interval = clamp(interval, 1UL, max_load_balance_interval);
8699     sdg->sgc->next_update = jiffies + interval;
8700 
8701     if (!child) {
8702         update_cpu_capacity(sd, cpu);
8703         return;
8704     }
8705 
8706     capacity = 0;
8707     min_capacity = ULONG_MAX;
8708     max_capacity = 0;
8709 
8710     if (child->flags & SD_OVERLAP) {
8711         /*
8712          * SD_OVERLAP domains cannot assume that child groups
8713          * span the current group.
8714          */
8715 
8716         for_each_cpu(cpu, sched_group_span(sdg))
8717         {
8718             unsigned long cpu_cap = capacity_of(cpu);
8719 
8720             if (cpu_isolated(cpu)) {
8721                 continue;
8722             }
8723 
8724             capacity += cpu_cap;
8725             min_capacity = min(cpu_cap, min_capacity);
8726             max_capacity = max(cpu_cap, max_capacity);
8727         }
8728     } else {
8729         /*
8730          * !SD_OVERLAP domains can assume that child groups
8731          * span the current group.
8732          */
8733 
8734         group = child->groups;
8735         do {
8736             struct sched_group_capacity *sgc = group->sgc;
8737             __maybe_unused cpumask_t *cpus = sched_group_span(group);
8738 
8739             if (!cpu_isolated(cpumask_first(cpus))) {
8740                 capacity += sgc->capacity;
8741                 min_capacity = min(sgc->min_capacity, min_capacity);
8742                 max_capacity = max(sgc->max_capacity, max_capacity);
8743             }
8744             group = group->next;
8745         } while (group != child->groups);
8746     }
8747 
8748     sdg->sgc->capacity = capacity;
8749     sdg->sgc->min_capacity = min_capacity;
8750     sdg->sgc->max_capacity = max_capacity;
8751 }
8752 
8753 /*
8754  * Check whether the capacity of the rq has been noticeably reduced by side
8755  * activity. The imbalance_pct is used for the threshold.
8756  * Return true is the capacity is reduced
8757  */
check_cpu_capacity(struct rq * rq,struct sched_domain * sd)8758 static inline int check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
8759 {
8760     return ((rq->cpu_capacity * sd->imbalance_pct) < (rq->cpu_capacity_orig * FAIR_ONEHUNDRED));
8761 }
8762 
8763 /*
8764  * Check whether a rq has a misfit task and if it looks like we can actually
8765  * help that task: we can migrate the task to a CPU of higher capacity, or
8766  * the task's current CPU is heavily pressured.
8767  */
check_misfit_status(struct rq * rq,struct sched_domain * sd)8768 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
8769 {
8770     return rq->misfit_task_load && (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || check_cpu_capacity(rq, sd));
8771 }
8772 
8773 /*
8774  * Group imbalance indicates (and tries to solve) the problem where balancing
8775  * groups is inadequate due to ->cpus_ptr constraints.
8776  *
8777  * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
8778  * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
8779  * Something like
8780  *
8781  *    { 0 1 2 3 } { 4 5 6 7 }
8782  *            *     * * *
8783  *
8784  * If we were to balance group-wise we'd place two tasks in the first group and
8785  * two tasks in the second group. Clearly this is undesired as it will overload
8786  * cpu 3 and leave one of the CPUs in the second group unused.
8787  *
8788  * The current solution to this issue is detecting the skew in the first group
8789  * by noticing the lower domain failed to reach balance and had difficulty
8790  * moving tasks due to affinity constraints.
8791  *
8792  * When this is so detected; this group becomes a candidate for busiest; see
8793  * update_sd_pick_busiest(). And calculate_imbalance() and
8794  * find_busiest_group() avoid some of the usual balance conditions to allow it
8795  * to create an effective group imbalance.
8796  *
8797  * This is a somewhat tricky proposition since the next run might not find the
8798  * group imbalance and decide the groups need to be balanced again. A most
8799  * subtle and fragile situation.
8800  */
8801 
sg_imbalanced(struct sched_group * group)8802 static inline int sg_imbalanced(struct sched_group *group)
8803 {
8804     return group->sgc->imbalance;
8805 }
8806 
8807 /*
8808  * group_has_capacity returns true if the group has spare capacity that could
8809  * be used by some tasks.
8810  * We consider that a group has spare capacity if the  * number of task is
8811  * smaller than the number of CPUs or if the utilization is lower than the
8812  * available capacity for CFS tasks.
8813  * For the latter, we use a threshold to stabilize the state, to take into
8814  * account the variance of the tasks' load and to return true if the available
8815  * capacity in meaningful for the load balancer.
8816  * As an example, an available capacity of 1% can appear but it doesn't make
8817  * any benefit for the load balance.
8818  */
group_has_capacity(unsigned int imbalance_pct,struct sg_lb_stats * sgs)8819 static inline bool group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
8820 {
8821     if (sgs->sum_nr_running < sgs->group_weight) {
8822         return true;
8823     }
8824 
8825     if ((sgs->group_capacity * imbalance_pct) < (sgs->group_runnable * FAIR_ONEHUNDRED)) {
8826         return false;
8827     }
8828 
8829     if ((sgs->group_capacity * FAIR_ONEHUNDRED) > (sgs->group_util * imbalance_pct)) {
8830         return true;
8831     }
8832 
8833     return false;
8834 }
8835 
8836 /*
8837  *  group_is_overloaded returns true if the group has more tasks than it can
8838  *  handle.
8839  *  group_is_overloaded is not equals to !group_has_capacity because a group
8840  *  with the exact right number of tasks, has no more spare capacity but is not
8841  *  overloaded so both group_has_capacity and group_is_overloaded return
8842  *  false.
8843  */
group_is_overloaded(unsigned int imbalance_pct,struct sg_lb_stats * sgs)8844 static inline bool group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
8845 {
8846     if (sgs->sum_nr_running <= sgs->group_weight) {
8847         return false;
8848     }
8849 
8850     if ((sgs->group_capacity * FAIR_ONEHUNDRED) < (sgs->group_util * imbalance_pct)) {
8851         return true;
8852     }
8853 
8854     if ((sgs->group_capacity * imbalance_pct) < (sgs->group_runnable * 0x64)) {
8855         return true;
8856     }
8857 
8858     return false;
8859 }
8860 
8861 /*
8862  * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller
8863  * per-CPU capacity than sched_group ref.
8864  */
group_smaller_min_cpu_capacity(struct sched_group * sg,struct sched_group * ref)8865 static inline bool group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
8866 {
8867     return fits_capacity(sg->sgc->min_capacity, ref->sgc->min_capacity);
8868 }
8869 
8870 /*
8871  * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller
8872  * per-CPU capacity_orig than sched_group ref.
8873  */
group_smaller_max_cpu_capacity(struct sched_group * sg,struct sched_group * ref)8874 static inline bool group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
8875 {
8876     return fits_capacity(sg->sgc->max_capacity, ref->sgc->max_capacity);
8877 }
8878 
group_classify(unsigned int imbalance_pct,struct sched_group * group,struct sg_lb_stats * sgs)8879 static inline enum group_type group_classify(unsigned int imbalance_pct, struct sched_group *group,
8880                                              struct sg_lb_stats *sgs)
8881 {
8882     if (group_is_overloaded(imbalance_pct, sgs)) {
8883         return group_overloaded;
8884     }
8885 
8886     if (sg_imbalanced(group)) {
8887         return group_imbalanced;
8888     }
8889 
8890     if (sgs->group_asym_packing) {
8891         return group_asym_packing;
8892     }
8893 
8894     if (sgs->group_misfit_task_load) {
8895         return group_misfit_task;
8896     }
8897 
8898     if (!group_has_capacity(imbalance_pct, sgs)) {
8899         return group_fully_busy;
8900     }
8901 
8902     return group_has_spare;
8903 }
8904 
update_nohz_stats(struct rq * rq,bool force)8905 static bool update_nohz_stats(struct rq *rq, bool force)
8906 {
8907 #ifdef CONFIG_NO_HZ_COMMON
8908     unsigned int cpu = rq->cpu;
8909 
8910     if (!rq->has_blocked_load) {
8911         return false;
8912     }
8913 
8914     if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) {
8915         return false;
8916     }
8917 
8918     if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick)) {
8919         return true;
8920     }
8921 
8922     update_blocked_averages(cpu);
8923 
8924     return rq->has_blocked_load;
8925 #else
8926     return false;
8927 #endif
8928 }
8929 
8930 /**
8931  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
8932  * @env: The load balancing environment.
8933  * @group: sched_group whose statistics are to be updated.
8934  * @sgs: variable to hold the statistics for this group.
8935  * @sg_status: Holds flag indicating the status of the sched_group
8936  */
update_sg_lb_stats(struct lb_env * env,struct sched_group * group,struct sg_lb_stats * sgs,int * sg_status)8937 static inline void update_sg_lb_stats(struct lb_env *env, struct sched_group *group, struct sg_lb_stats *sgs,
8938                                       int *sg_status)
8939 {
8940     int i, nr_running, local_group;
8941 
8942     memset(sgs, 0, sizeof(*sgs));
8943 
8944     local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group));
8945 
8946     for_each_cpu_and(i, sched_group_span(group), env->cpus)
8947     {
8948         struct rq *rq = cpu_rq(i);
8949 
8950         if (cpu_isolated(i)) {
8951             continue;
8952         }
8953 
8954         if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) {
8955             env->flags |= LBF_NOHZ_AGAIN;
8956         }
8957 
8958         sgs->group_load += cpu_load(rq);
8959         sgs->group_util += cpu_util(i);
8960         sgs->group_runnable += cpu_runnable(rq);
8961         sgs->sum_h_nr_running += rq->cfs.h_nr_running;
8962 
8963         nr_running = rq->nr_running;
8964         sgs->sum_nr_running += nr_running;
8965 
8966         if (nr_running > 1) {
8967             *sg_status |= SG_OVERLOAD;
8968         }
8969 
8970         if (cpu_overutilized(i)) {
8971             *sg_status |= SG_OVERUTILIZED;
8972         }
8973 
8974 #ifdef CONFIG_NUMA_BALANCING
8975         sgs->nr_numa_running += rq->nr_numa_running;
8976         sgs->nr_preferred_running += rq->nr_preferred_running;
8977 #endif
8978         /*
8979          * No need to call idle_cpu() if nr_running is not 0
8980          */
8981         if (!nr_running && idle_cpu(i)) {
8982             sgs->idle_cpus++;
8983             /* Idle cpu can't have misfit task */
8984             continue;
8985         }
8986 
8987         if (local_group) {
8988             continue;
8989         }
8990 
8991         /* Check for a misfit task on the cpu */
8992         if (env->sd->flags & SD_ASYM_CPUCAPACITY && sgs->group_misfit_task_load < rq->misfit_task_load) {
8993             sgs->group_misfit_task_load = rq->misfit_task_load;
8994             *sg_status |= SG_OVERLOAD;
8995         }
8996     }
8997 
8998     /* Isolated CPU has no weight */
8999     if (!group->group_weight) {
9000         sgs->group_capacity = 0;
9001         sgs->avg_load = 0;
9002         sgs->group_type = group_has_spare;
9003         sgs->group_weight = group->group_weight;
9004         return;
9005     }
9006 
9007     /* Check if dst CPU is idle and preferred to this group */
9008     if (env->sd->flags & SD_ASYM_PACKING && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
9009         sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) {
9010         sgs->group_asym_packing = 1;
9011     }
9012 
9013     sgs->group_capacity = group->sgc->capacity;
9014 
9015     sgs->group_weight = group->group_weight;
9016 
9017     sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
9018 
9019     /* Computing avg_load makes sense only when group is overloaded */
9020     if (sgs->group_type == group_overloaded) {
9021         sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / sgs->group_capacity;
9022     }
9023 }
9024 
9025 /**
9026  * update_sd_pick_busiest - return 1 on busiest group
9027  * @env: The load balancing environment.
9028  * @sds: sched_domain statistics
9029  * @sg: sched_group candidate to be checked for being the busiest
9030  * @sgs: sched_group statistics
9031  *
9032  * Determine if @sg is a busier group than the previously selected
9033  * busiest group.
9034  *
9035  * Return: %true if @sg is a busier group than the previously selected
9036  * busiest group. %false otherwise.
9037  */
update_sd_pick_busiest(struct lb_env * env,struct sd_lb_stats * sds,struct sched_group * sg,struct sg_lb_stats * sgs)9038 static bool update_sd_pick_busiest(struct lb_env *env, struct sd_lb_stats *sds, struct sched_group *sg,
9039                                    struct sg_lb_stats *sgs)
9040 {
9041     struct sg_lb_stats *busiest = &sds->busiest_stat;
9042 
9043     /* Make sure that there is at least one task to pull */
9044     if (!sgs->sum_h_nr_running) {
9045         return false;
9046     }
9047 
9048     /*
9049      * Don't try to pull misfit tasks we can't help.
9050      * We can use max_capacity here as reduction in capacity on some
9051      * CPUs in the group should either be possible to resolve
9052      * internally or be covered by avg_load imbalance (eventually).
9053      */
9054     if (sgs->group_type == group_misfit_task &&
9055         (!group_smaller_max_cpu_capacity(sg, sds->local) || sds->local_stat.group_type != group_has_spare)) {
9056         return false;
9057     }
9058 
9059     if (sgs->group_type > busiest->group_type) {
9060         return true;
9061     }
9062 
9063     if (sgs->group_type < busiest->group_type) {
9064         return false;
9065     }
9066 
9067     /*
9068      * The candidate and the current busiest group are the same type of
9069      * group. Let check which one is the busiest according to the type.
9070      */
9071 
9072     switch (sgs->group_type) {
9073         case group_overloaded:
9074             /* Select the overloaded group with highest avg_load. */
9075             if (sgs->avg_load <= busiest->avg_load) {
9076                 return false;
9077             }
9078             break;
9079 
9080         case group_imbalanced:
9081             /*
9082              * Select the 1st imbalanced group as we don't have any way to
9083              * choose one more than another.
9084              */
9085             return false;
9086 
9087         case group_asym_packing:
9088             /* Prefer to move from lowest priority CPU's work */
9089             if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) {
9090                 return false;
9091             }
9092             break;
9093 
9094         case group_misfit_task:
9095             /*
9096              * If we have more than one misfit sg go with the biggest
9097              * misfit.
9098              */
9099             if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) {
9100                 return false;
9101             }
9102             break;
9103 
9104         case group_fully_busy:
9105             /*
9106              * Select the fully busy group with highest avg_load. In
9107              * theory, there is no need to pull task from such kind of
9108              * group because tasks have all compute capacity that they need
9109              * but we can still improve the overall throughput by reducing
9110              * contention when accessing shared HW resources.
9111              *
9112              * XXX for now avg_load is not computed and always 0 so we
9113              * select the 1st one.
9114              */
9115             if (sgs->avg_load <= busiest->avg_load) {
9116                 return false;
9117             }
9118             break;
9119 
9120         case group_has_spare:
9121             /*
9122              * Select not overloaded group with lowest number of idle cpus
9123              * and highest number of running tasks. We could also compare
9124              * the spare capacity which is more stable but it can end up
9125              * that the group has less spare capacity but finally more idle
9126              * CPUs which means less opportunity to pull tasks.
9127              */
9128             if (sgs->idle_cpus > busiest->idle_cpus) {
9129                 return false;
9130             } else if ((sgs->idle_cpus == busiest->idle_cpus) && (sgs->sum_nr_running <= busiest->sum_nr_running)) {
9131                 return false;
9132             }
9133 
9134             break;
9135     }
9136 
9137     /*
9138      * Candidate sg has no more than one task per CPU and has higher
9139      * per-CPU capacity. Migrating tasks to less capable CPUs may harm
9140      * throughput. Maximize throughput, power/energy consequences are not
9141      * considered.
9142      */
9143     if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && (sgs->group_type <= group_fully_busy) &&
9144         (group_smaller_min_cpu_capacity(sds->local, sg))) {
9145         return false;
9146     }
9147 
9148     return true;
9149 }
9150 
9151 #ifdef CONFIG_NUMA_BALANCING
fbq_classify_group(struct sg_lb_stats * sgs)9152 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
9153 {
9154     if (sgs->sum_h_nr_running > sgs->nr_numa_running) {
9155         return regular;
9156     }
9157     if (sgs->sum_h_nr_running > sgs->nr_preferred_running) {
9158         return remote;
9159     }
9160     return all;
9161 }
9162 
fbq_classify_rq(struct rq * rq)9163 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
9164 {
9165     if (rq->nr_running > rq->nr_numa_running) {
9166         return regular;
9167     }
9168     if (rq->nr_running > rq->nr_preferred_running) {
9169         return remote;
9170     }
9171     return all;
9172 }
9173 #else
fbq_classify_group(struct sg_lb_stats * sgs)9174 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
9175 {
9176     return all;
9177 }
9178 
fbq_classify_rq(struct rq * rq)9179 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
9180 {
9181     return regular;
9182 }
9183 #endif /* CONFIG_NUMA_BALANCING */
9184 
9185 struct sg_lb_stats;
9186 
9187 /*
9188  * task_running_on_cpu - return 1 if @p is running on @cpu.
9189  */
9190 
task_running_on_cpu(int cpu,struct task_struct * p)9191 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
9192 {
9193     /* Task has no contribution or is new */
9194     if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) {
9195         return 0;
9196     }
9197 
9198     if (task_on_rq_queued(p)) {
9199         return 1;
9200     }
9201 
9202     return 0;
9203 }
9204 
9205 /**
9206  * idle_cpu_without - would a given CPU be idle without p ?
9207  * @cpu: the processor on which idleness is tested.
9208  * @p: task which should be ignored.
9209  *
9210  * Return: 1 if the CPU would be idle. 0 otherwise.
9211  */
idle_cpu_without(int cpu,struct task_struct * p)9212 static int idle_cpu_without(int cpu, struct task_struct *p)
9213 {
9214     struct rq *rq = cpu_rq(cpu);
9215 
9216     if (rq->curr != rq->idle && rq->curr != p) {
9217         return 0;
9218     }
9219 
9220     /*
9221      * rq->nr_running can't be used but an updated version without the
9222      * impact of p on cpu must be used instead. The updated nr_running
9223      * be computed and tested before calling idle_cpu_without().
9224      */
9225 
9226 #ifdef CONFIG_SMP
9227     if (rq->ttwu_pending) {
9228         return 0;
9229     }
9230 #endif
9231 
9232     return 1;
9233 }
9234 
9235 /*
9236  * update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
9237  * @sd: The sched_domain level to look for idlest group.
9238  * @group: sched_group whose statistics are to be updated.
9239  * @sgs: variable to hold the statistics for this group.
9240  * @p: The task for which we look for the idlest group/CPU.
9241  */
update_sg_wakeup_stats(struct sched_domain * sd,struct sched_group * group,struct sg_lb_stats * sgs,struct task_struct * p)9242 static inline void update_sg_wakeup_stats(struct sched_domain *sd, struct sched_group *group, struct sg_lb_stats *sgs,
9243                                           struct task_struct *p)
9244 {
9245     int i, nr_running;
9246 
9247     memset(sgs, 0, sizeof(*sgs));
9248 
9249     for_each_cpu(i, sched_group_span(group))
9250     {
9251         struct rq *rq = cpu_rq(i);
9252         unsigned int local;
9253 
9254         sgs->group_load += cpu_load_without(rq, p);
9255         sgs->group_util += cpu_util_without(i, p);
9256         sgs->group_runnable += cpu_runnable_without(rq, p);
9257         local = task_running_on_cpu(i, p);
9258         sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
9259 
9260         nr_running = rq->nr_running - local;
9261         sgs->sum_nr_running += nr_running;
9262 
9263         /*
9264          * No need to call idle_cpu_without() if nr_running is not 0
9265          */
9266         if (!nr_running && idle_cpu_without(i, p)) {
9267             sgs->idle_cpus++;
9268         }
9269     }
9270 
9271     /* Check if task fits in the group */
9272     if (sd->flags & SD_ASYM_CPUCAPACITY && !task_fits_capacity(p, group->sgc->max_capacity)) {
9273         sgs->group_misfit_task_load = 1;
9274     }
9275 
9276     sgs->group_capacity = group->sgc->capacity;
9277 
9278     sgs->group_weight = group->group_weight;
9279 
9280     sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
9281 
9282     /*
9283      * Computing avg_load makes sense only when group is fully busy or
9284      * overloaded
9285      */
9286     if (sgs->group_type == group_fully_busy || sgs->group_type == group_overloaded) {
9287         sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / sgs->group_capacity;
9288     }
9289 }
9290 
update_pick_idlest(struct sched_group * idlest,struct sg_lb_stats * idlest_sgs,struct sched_group * group,struct sg_lb_stats * sgs)9291 static bool update_pick_idlest(struct sched_group *idlest, struct sg_lb_stats *idlest_sgs, struct sched_group *group,
9292                                struct sg_lb_stats *sgs)
9293 {
9294     if (sgs->group_type < idlest_sgs->group_type) {
9295         return true;
9296     }
9297 
9298     if (sgs->group_type > idlest_sgs->group_type) {
9299         return false;
9300     }
9301 
9302     /*
9303      * The candidate and the current idlest group are the same type of
9304      * group. Let check which one is the idlest according to the type.
9305      */
9306 
9307     switch (sgs->group_type) {
9308         case group_overloaded:
9309         case group_fully_busy:
9310             /* Select the group with lowest avg_load. */
9311             if (idlest_sgs->avg_load <= sgs->avg_load) {
9312                 return false;
9313             }
9314             break;
9315 
9316         case group_imbalanced:
9317         case group_asym_packing:
9318             /* Those types are not used in the slow wakeup path */
9319             return false;
9320 
9321         case group_misfit_task:
9322             /* Select group with the highest max capacity */
9323             if (idlest->sgc->max_capacity >= group->sgc->max_capacity) {
9324                 return false;
9325             }
9326             break;
9327 
9328         case group_has_spare:
9329             /* Select group with most idle CPUs */
9330             if (idlest_sgs->idle_cpus > sgs->idle_cpus) {
9331                 return false;
9332             }
9333 
9334             /* Select group with lowest group_util */
9335             if (idlest_sgs->idle_cpus == sgs->idle_cpus && idlest_sgs->group_util <= sgs->group_util) {
9336                 return false;
9337             }
9338 
9339             break;
9340     }
9341 
9342     return true;
9343 }
9344 
9345 /*
9346  * find_idlest_group() finds and returns the least busy CPU group within the
9347  * domain.
9348  *
9349  * Assumes p is allowed on at least one CPU in sd.
9350  */
find_idlest_group(struct sched_domain * sd,struct task_struct * p,int this_cpu)9351 static struct sched_group *find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
9352 {
9353     struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
9354     struct sg_lb_stats local_sgs, tmp_sgs;
9355     struct sg_lb_stats *sgs;
9356     unsigned long imbalance;
9357     struct sg_lb_stats idlest_sgs = {
9358         .avg_load = UINT_MAX,
9359         .group_type = group_overloaded,
9360     };
9361 #ifdef CONFIG_CPU_ISOLATION_OPT
9362     cpumask_t allowed_cpus;
9363 
9364     cpumask_andnot(&allowed_cpus, p->cpus_ptr, cpu_isolated_mask);
9365 #endif
9366 
9367     imbalance = scale_load_down(NICE_0_LOAD) * (sd->imbalance_pct - FAIR_ONEHUNDRED) / FAIR_ONEHUNDRED;
9368 
9369     do {
9370         int local_group;
9371 
9372         /* Skip over this group if it has no CPUs allowed */
9373 #ifdef CONFIG_CPU_ISOLATION_OPT
9374         if (!cpumask_intersects(sched_group_span(group), &allowed_cpus))
9375 #else
9376         if (!cpumask_intersects(sched_group_span(group), p->cpus_ptr))
9377 #endif
9378             continue;
9379 
9380         local_group = cpumask_test_cpu(this_cpu, sched_group_span(group));
9381         if (local_group) {
9382             sgs = &local_sgs;
9383             local = group;
9384         } else {
9385             sgs = &tmp_sgs;
9386         }
9387 
9388         update_sg_wakeup_stats(sd, group, sgs, p);
9389 
9390         if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
9391             idlest = group;
9392             idlest_sgs = *sgs;
9393         }
9394     } while (group = group->next, group != sd->groups);
9395 
9396     /* There is no idlest group to push tasks to */
9397     if (!idlest) {
9398         return NULL;
9399     }
9400 
9401     /* The local group has been skipped because of CPU affinity */
9402     if (!local) {
9403         return idlest;
9404     }
9405 
9406     /*
9407      * If the local group is idler than the selected idlest group
9408      * don't try and push the task.
9409      */
9410     if (local_sgs.group_type < idlest_sgs.group_type) {
9411         return NULL;
9412     }
9413 
9414     /*
9415      * If the local group is busier than the selected idlest group
9416      * try and push the task.
9417      */
9418     if (local_sgs.group_type > idlest_sgs.group_type) {
9419         return idlest;
9420     }
9421 
9422     switch (local_sgs.group_type) {
9423         case group_overloaded:
9424         case group_fully_busy:
9425             /*
9426              * When comparing groups across NUMA domains, it's possible for
9427              * the local domain to be very lightly loaded relative to the
9428              * remote domains but "imbalance" skews the comparison making
9429              * remote CPUs look much more favourable. When considering
9430              * cross-domain, add imbalance to the load on the remote node
9431              * and consider staying local.
9432              */
9433 
9434             if ((sd->flags & SD_NUMA) && ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load)) {
9435                 return NULL;
9436             }
9437 
9438             /*
9439              * If the local group is less loaded than the selected
9440              * idlest group don't try and push any tasks.
9441              */
9442             if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance)) {
9443                 return NULL;
9444             }
9445 
9446             if (FAIR_ONEHUNDRED * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load) {
9447                 return NULL;
9448             }
9449             break;
9450 
9451         case group_imbalanced:
9452         case group_asym_packing:
9453             /* Those type are not used in the slow wakeup path */
9454             return NULL;
9455 
9456         case group_misfit_task:
9457             /* Select group with the highest max capacity */
9458             if (local->sgc->max_capacity >= idlest->sgc->max_capacity) {
9459                 return NULL;
9460             }
9461             break;
9462 
9463         case group_has_spare:
9464             if (sd->flags & SD_NUMA) {
9465 #ifdef CONFIG_NUMA_BALANCING
9466                 int idlest_cpu;
9467                 /*
9468                  * If there is spare capacity at NUMA, try to select
9469                  * the preferred node
9470                  */
9471                 if (cpu_to_node(this_cpu) == p->numa_preferred_nid) {
9472                     return NULL;
9473                 }
9474 
9475                 idlest_cpu = cpumask_first(sched_group_span(idlest));
9476                 if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid) {
9477                     return idlest;
9478                 }
9479 #endif
9480                 /*
9481                  * Otherwise, keep the task on this node to stay close
9482                  * its wakeup source and improve locality. If there is
9483                  * a real need of migration, periodic load balance will
9484                  * take care of it.
9485                  */
9486                 if (local_sgs.idle_cpus) {
9487                     return NULL;
9488                 }
9489             }
9490 
9491             /*
9492              * Select group with highest number of idle CPUs. We could also
9493              * compare the utilization which is more stable but it can end
9494              * up that the group has less spare capacity but finally more
9495              * idle CPUs which means more opportunity to run task.
9496              */
9497             if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus) {
9498                 return NULL;
9499             }
9500             break;
9501     }
9502 
9503     return idlest;
9504 }
9505 
9506 /**
9507  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
9508  * @env: The load balancing environment.
9509  * @sds: variable to hold the statistics for this sched_domain.
9510  */
9511 
update_sd_lb_stats(struct lb_env * env,struct sd_lb_stats * sds)9512 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
9513 {
9514     struct sched_domain *child = env->sd->child;
9515     struct sched_group *sg = env->sd->groups;
9516     struct sg_lb_stats *local = &sds->local_stat;
9517     struct sg_lb_stats tmp_sgs;
9518     int sg_status = 0;
9519 
9520 #ifdef CONFIG_NO_HZ_COMMON
9521     if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) {
9522         env->flags |= LBF_NOHZ_STATS;
9523     }
9524 #endif
9525 
9526     do {
9527         struct sg_lb_stats *sgs = &tmp_sgs;
9528         int local_group;
9529 
9530         local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
9531         if (local_group) {
9532             sds->local = sg;
9533             sgs = local;
9534 
9535             if (env->idle != CPU_NEWLY_IDLE || time_after_eq(jiffies, sg->sgc->next_update)) {
9536                 update_group_capacity(env->sd, env->dst_cpu);
9537             }
9538         }
9539 
9540         update_sg_lb_stats(env, sg, sgs, &sg_status);
9541 
9542         if (local_group) {
9543             goto next_group;
9544         }
9545 
9546         if (update_sd_pick_busiest(env, sds, sg, sgs)) {
9547             sds->busiest = sg;
9548             sds->busiest_stat = *sgs;
9549         }
9550 
9551     next_group:
9552         /* Now, start updating sd_lb_stats */
9553         sds->total_load += sgs->group_load;
9554         sds->total_capacity += sgs->group_capacity;
9555 
9556         sg = sg->next;
9557     } while (sg != env->sd->groups);
9558 
9559     /* Tag domain that child domain prefers tasks go to siblings first */
9560     sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
9561 
9562 #ifdef CONFIG_NO_HZ_COMMON
9563     if ((env->flags & LBF_NOHZ_AGAIN) && cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) {
9564         WRITE_ONCE(nohz.next_blocked, jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD));
9565     }
9566 #endif
9567 
9568     if (env->sd->flags & SD_NUMA) {
9569         env->fbq_type = fbq_classify_group(&sds->busiest_stat);
9570     }
9571 
9572     if (!env->sd->parent) {
9573         struct root_domain *rd = env->dst_rq->rd;
9574 
9575         /* update overload indicator if we are at root domain */
9576         WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
9577 
9578         /* Update over-utilization (tipping point, U >= 0) indicator */
9579         WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
9580         trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
9581     } else if (sg_status & SG_OVERUTILIZED) {
9582         struct root_domain *rd = env->dst_rq->rd;
9583 
9584         WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
9585         trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
9586     }
9587 }
9588 
adjust_numa_imbalance(int imbalance,int nr_running)9589 static inline long adjust_numa_imbalance(int imbalance, int nr_running)
9590 {
9591     unsigned int imbalance_min;
9592 
9593     /*
9594      * Allow a small imbalance based on a simple pair of communicating
9595      * tasks that remain local when the source domain is almost idle.
9596      */
9597     imbalance_min = 0x2;
9598     if (nr_running <= imbalance_min) {
9599         return 0;
9600     }
9601 
9602     return imbalance;
9603 }
9604 
9605 /**
9606  * calculate_imbalance - Calculate the amount of imbalance present within the
9607  *             groups of a given sched_domain during load balance.
9608  * @env: load balance environment
9609  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
9610  */
calculate_imbalance(struct lb_env * env,struct sd_lb_stats * sds)9611 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
9612 {
9613     struct sg_lb_stats *local, *busiest;
9614 
9615     local = &sds->local_stat;
9616     busiest = &sds->busiest_stat;
9617 
9618     if (busiest->group_type == group_misfit_task) {
9619         /* Set imbalance to allow misfit tasks to be balanced. */
9620         env->migration_type = migrate_misfit;
9621         env->imbalance = 1;
9622         return;
9623     }
9624 
9625     if (busiest->group_type == group_asym_packing) {
9626         /*
9627          * In case of asym capacity, we will try to migrate all load to
9628          * the preferred CPU.
9629          */
9630         env->migration_type = migrate_task;
9631         env->imbalance = busiest->sum_h_nr_running;
9632         return;
9633     }
9634 
9635     if (busiest->group_type == group_imbalanced) {
9636         /*
9637          * In the group_imb case we cannot rely on group-wide averages
9638          * to ensure CPU-load equilibrium, try to move any task to fix
9639          * the imbalance. The next load balance will take care of
9640          * balancing back the system.
9641          */
9642         env->migration_type = migrate_task;
9643         env->imbalance = 1;
9644         return;
9645     }
9646 
9647     /*
9648      * Try to use spare capacity of local group without overloading it or
9649      * emptying busiest.
9650      */
9651     if (local->group_type == group_has_spare) {
9652         if ((busiest->group_type > group_fully_busy) && !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
9653             /*
9654              * If busiest is overloaded, try to fill spare
9655              * capacity. This might end up creating spare capacity
9656              * in busiest or busiest still being overloaded but
9657              * there is no simple way to directly compute the
9658              * amount of load to migrate in order to balance the
9659              * system.
9660              */
9661             env->migration_type = migrate_util;
9662             env->imbalance = max(local->group_capacity, local->group_util) - local->group_util;
9663 
9664             /*
9665              * In some cases, the group's utilization is max or even
9666              * higher than capacity because of migrations but the
9667              * local CPU is (newly) idle. There is at least one
9668              * waiting task in this overloaded busiest group. Let's
9669              * try to pull it.
9670              */
9671             if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
9672                 env->migration_type = migrate_task;
9673                 env->imbalance = 1;
9674             }
9675 
9676             return;
9677         }
9678 
9679         if (busiest->group_weight == 1 || sds->prefer_sibling) {
9680             unsigned int nr_diff = busiest->sum_nr_running;
9681             /*
9682              * When prefer sibling, evenly spread running tasks on
9683              * groups.
9684              */
9685             env->migration_type = migrate_task;
9686             lsub_positive(&nr_diff, local->sum_nr_running);
9687             env->imbalance = nr_diff >> 1;
9688         } else {
9689             /*
9690              * If there is no overload, we just want to even the number of
9691              * idle cpus.
9692              */
9693             env->migration_type = migrate_task;
9694             env->imbalance = max_t(long, 0, (local->idle_cpus - busiest->idle_cpus) >> 1);
9695         }
9696 
9697         /* Consider allowing a small imbalance between NUMA groups */
9698         if (env->sd->flags & SD_NUMA) {
9699             env->imbalance = adjust_numa_imbalance(env->imbalance, busiest->sum_nr_running);
9700         }
9701 
9702         return;
9703     }
9704 
9705     /*
9706      * Local is fully busy but has to take more load to relieve the
9707      * busiest group
9708      */
9709     if (local->group_type < group_overloaded) {
9710         /*
9711          * Local will become overloaded so the avg_load metrics are
9712          * finally needed.
9713          */
9714 
9715         local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) / local->group_capacity;
9716 
9717         sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) / sds->total_capacity;
9718         /*
9719          * If the local group is more loaded than the selected
9720          * busiest group don't try to pull any tasks.
9721          */
9722         if (local->avg_load >= busiest->avg_load) {
9723             env->imbalance = 0;
9724             return;
9725         }
9726     }
9727 
9728     /*
9729      * Both group are or will become overloaded and we're trying to get all
9730      * the CPUs to the average_load, so we don't want to push ourselves
9731      * above the average load, nor do we wish to reduce the max loaded CPU
9732      * below the average load. At the same time, we also don't want to
9733      * reduce the group load below the group capacity. Thus we look for
9734      * the minimum possible imbalance.
9735      */
9736     env->migration_type = migrate_load;
9737     env->imbalance = min((busiest->avg_load - sds->avg_load) * busiest->group_capacity,
9738                          (sds->avg_load - local->avg_load) * local->group_capacity) /
9739                      SCHED_CAPACITY_SCALE;
9740 }
9741 
9742 /******* find_busiest_group() helpers end here *********************/
9743 
9744 /*
9745  * Decision matrix according to the local and busiest group type:
9746  *
9747  * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
9748  * has_spare        nr_idle   balanced   N/A    N/A  balanced   balanced
9749  * fully_busy       nr_idle   nr_idle    N/A    N/A  balanced   balanced
9750  * misfit_task      force     N/A        N/A    N/A  force      force
9751  * asym_packing     force     force      N/A    N/A  force      force
9752  * imbalanced       force     force      N/A    N/A  force      force
9753  * overloaded       force     force      N/A    N/A  force      avg_load
9754  *
9755  * N/A :      Not Applicable because already filtered while updating
9756  *            statistics.
9757  * balanced : The system is balanced for these 2 groups.
9758  * force :    Calculate the imbalance as load migration is probably needed.
9759  * avg_load : Only if imbalance is significant enough.
9760  * nr_idle :  dst_cpu is not busy and the number of idle CPUs is quite
9761  *            different in groups.
9762  */
9763 
9764 /**
9765  * find_busiest_group - Returns the busiest group within the sched_domain
9766  * if there is an imbalance.
9767  *
9768  * Also calculates the amount of runnable load which should be moved
9769  * to restore balance.
9770  *
9771  * @env: The load balancing environment.
9772  *
9773  * Return:    - The busiest group if imbalance exists.
9774  */
find_busiest_group(struct lb_env * env)9775 static struct sched_group *find_busiest_group(struct lb_env *env)
9776 {
9777     struct sg_lb_stats *local, *busiest;
9778     struct sd_lb_stats sds;
9779 
9780     init_sd_lb_stats(&sds);
9781 
9782     /*
9783      * Compute the various statistics relevant for load balancing at
9784      * this level.
9785      */
9786     update_sd_lb_stats(env, &sds);
9787 
9788     if (sched_energy_enabled()) {
9789         struct root_domain *rd = env->dst_rq->rd;
9790 
9791         if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) {
9792             goto out_balanced;
9793         }
9794     }
9795 
9796     local = &sds.local_stat;
9797     busiest = &sds.busiest_stat;
9798 
9799     /* There is no busy sibling group to pull tasks from */
9800     if (!sds.busiest) {
9801         goto out_balanced;
9802     }
9803 
9804     /* Misfit tasks should be dealt with regardless of the avg load */
9805     if (busiest->group_type == group_misfit_task) {
9806         goto force_balance;
9807     }
9808 
9809     /* ASYM feature bypasses nice load balance check */
9810     if (busiest->group_type == group_asym_packing) {
9811         goto force_balance;
9812     }
9813 
9814     /*
9815      * If the busiest group is imbalanced the below checks don't
9816      * work because they assume all things are equal, which typically
9817      * isn't true due to cpus_ptr constraints and the like.
9818      */
9819     if (busiest->group_type == group_imbalanced) {
9820         goto force_balance;
9821     }
9822 
9823     /*
9824      * If the local group is busier than the selected busiest group
9825      * don't try and pull any tasks.
9826      */
9827     if (local->group_type > busiest->group_type) {
9828         goto out_balanced;
9829     }
9830 
9831     /*
9832      * When groups are overloaded, use the avg_load to ensure fairness
9833      * between tasks.
9834      */
9835     if (local->group_type == group_overloaded) {
9836         /*
9837          * If the local group is more loaded than the selected
9838          * busiest group don't try to pull any tasks.
9839          */
9840         if (local->avg_load >= busiest->avg_load) {
9841             goto out_balanced;
9842         }
9843 
9844         /* XXX broken for overlapping NUMA groups */
9845         sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) / sds.total_capacity;
9846 
9847         /*
9848          * Don't pull any tasks if this group is already above the
9849          * domain average load.
9850          */
9851         if (local->avg_load >= sds.avg_load) {
9852             goto out_balanced;
9853         }
9854 
9855         /*
9856          * If the busiest group is more loaded, use imbalance_pct to be
9857          * conservative.
9858          */
9859         if (FAIR_ONEHUNDRED * busiest->avg_load <= env->sd->imbalance_pct * local->avg_load) {
9860             goto out_balanced;
9861         }
9862     }
9863 
9864     /* Try to move all excess tasks to child's sibling domain */
9865     if (sds.prefer_sibling && local->group_type == group_has_spare &&
9866         busiest->sum_nr_running > local->sum_nr_running + 1) {
9867         goto force_balance;
9868     }
9869 
9870     if (busiest->group_type != group_overloaded) {
9871         if (env->idle == CPU_NOT_IDLE) {
9872             /*
9873              * If the busiest group is not overloaded (and as a
9874              * result the local one too) but this CPU is already
9875              * busy, let another idle CPU try to pull task.
9876              */
9877             goto out_balanced;
9878         }
9879 
9880         if (busiest->group_weight > 1 && local->idle_cpus <= (busiest->idle_cpus + 1)) {
9881             /*
9882              * If the busiest group is not overloaded
9883              * and there is no imbalance between this and busiest
9884              * group wrt idle CPUs, it is balanced. The imbalance
9885              * becomes significant if the diff is greater than 1
9886              * otherwise we might end up to just move the imbalance
9887              * on another group. Of course this applies only if
9888              * there is more than 1 CPU per group.
9889              */
9890             goto out_balanced;
9891         }
9892 
9893         if (busiest->sum_h_nr_running == 1) {
9894             /*
9895              * busiest doesn't have any tasks waiting to run
9896              */
9897             goto out_balanced;
9898         }
9899     }
9900 
9901 force_balance:
9902     /* Looks like there is an imbalance. Compute it */
9903     calculate_imbalance(env, &sds);
9904     return env->imbalance ? sds.busiest : NULL;
9905 
9906 out_balanced:
9907     env->imbalance = 0;
9908     return NULL;
9909 }
9910 
9911 /*
9912  * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
9913  */
find_busiest_queue(struct lb_env * env,struct sched_group * group)9914 static struct rq *find_busiest_queue(struct lb_env *env, struct sched_group *group)
9915 {
9916     struct rq *busiest = NULL, *rq;
9917     unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
9918     unsigned int busiest_nr = 0;
9919     int i;
9920 
9921     for_each_cpu_and(i, sched_group_span(group), env->cpus)
9922     {
9923         unsigned long capacity, load, util;
9924         unsigned int nr_running;
9925         enum fbq_type rt;
9926 
9927         rq = cpu_rq(i);
9928         rt = fbq_classify_rq(rq);
9929         /*
9930          * We classify groups/runqueues into three groups:
9931          *  - regular: there are !numa tasks
9932          *  - remote:  there are numa tasks that run on the 'wrong' node
9933          *  - all:     there is no distinction
9934          *
9935          * In order to avoid migrating ideally placed numa tasks,
9936          * ignore those when there's better options.
9937          *
9938          * If we ignore the actual busiest queue to migrate another
9939          * task, the next balance pass can still reduce the busiest
9940          * queue by moving tasks around inside the node.
9941          *
9942          * If we cannot move enough load due to this classification
9943          * the next pass will adjust the group classification and
9944          * allow migration of more tasks.
9945          *
9946          * Both cases only affect the total convergence complexity.
9947          */
9948         if (rt > env->fbq_type) {
9949             continue;
9950         }
9951 
9952         if (cpu_isolated(i)) {
9953             continue;
9954         }
9955 
9956         capacity = capacity_of(i);
9957         nr_running = rq->cfs.h_nr_running;
9958 
9959         /*
9960          * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
9961          * eventually lead to active_balancing high->low capacity.
9962          * Higher per-CPU capacity is considered better than balancing
9963          * average load.
9964          */
9965         if (env->sd->flags & SD_ASYM_CPUCAPACITY && capacity_of(env->dst_cpu) < capacity && nr_running == 1) {
9966             continue;
9967         }
9968 
9969         switch (env->migration_type) {
9970             case migrate_load:
9971                 /*
9972                  * When comparing with load imbalance, use cpu_load()
9973                  * which is not scaled with the CPU capacity.
9974                  */
9975                 load = cpu_load(rq);
9976                 if (nr_running == 1 && load > env->imbalance && !check_cpu_capacity(rq, env->sd)) {
9977                     break;
9978                 }
9979 
9980                 /*
9981                  * For the load comparisons with the other CPUs,
9982                  * consider the cpu_load() scaled with the CPU
9983                  * capacity, so that the load can be moved away
9984                  * from the CPU that is potentially running at a
9985                  * lower capacity.
9986                  *
9987                  * Thus we're looking for max(load_i / capacity_i),
9988                  * crosswise multiplication to rid ourselves of the
9989                  * division works out to:
9990                  * load_i * capacity_j > load_j * capacity_i;
9991                  * where j is our previous maximum.
9992                  */
9993                 if (load * busiest_capacity > busiest_load * capacity) {
9994                     busiest_load = load;
9995                     busiest_capacity = capacity;
9996                     busiest = rq;
9997                 }
9998                 break;
9999 
10000             case migrate_util:
10001                 util = cpu_util(cpu_of(rq));
10002 
10003                 /*
10004                  * Don't try to pull utilization from a CPU with one
10005                  * running task. Whatever its utilization, we will fail
10006                  * detach the task.
10007                  */
10008                 if (nr_running <= 1) {
10009                     continue;
10010                 }
10011 
10012                 if (busiest_util < util) {
10013                     busiest_util = util;
10014                     busiest = rq;
10015                 }
10016                 break;
10017 
10018             case migrate_task:
10019                 if (busiest_nr < nr_running) {
10020                     busiest_nr = nr_running;
10021                     busiest = rq;
10022                 }
10023                 break;
10024 
10025             case migrate_misfit:
10026                 /*
10027                  * For ASYM_CPUCAPACITY domains with misfit tasks we
10028                  * simply seek the "biggest" misfit task.
10029                  */
10030                 if (rq->misfit_task_load > busiest_load) {
10031                     busiest_load = rq->misfit_task_load;
10032                     busiest = rq;
10033                 }
10034 
10035                 break;
10036         }
10037     }
10038 
10039     return busiest;
10040 }
10041 
10042 /*
10043  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
10044  * so long as it is large enough.
10045  */
10046 #define MAX_PINNED_INTERVAL 512
10047 
asym_active_balance(struct lb_env * env)10048 static inline bool asym_active_balance(struct lb_env *env)
10049 {
10050     /*
10051      * ASYM_PACKING needs to force migrate tasks from busy but
10052      * lower priority CPUs in order to pack all tasks in the
10053      * highest priority CPUs.
10054      */
10055     return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
10056            sched_asym_prefer(env->dst_cpu, env->src_cpu);
10057 }
10058 
voluntary_active_balance(struct lb_env * env)10059 static inline bool voluntary_active_balance(struct lb_env *env)
10060 {
10061     struct sched_domain *sd = env->sd;
10062 
10063     if (asym_active_balance(env)) {
10064         return 1;
10065     }
10066 
10067     /*
10068      * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
10069      * It's worth migrating the task if the src_cpu's capacity is reduced
10070      * because of other sched_class or IRQs if more capacity stays
10071      * available on dst_cpu.
10072      */
10073     if ((env->idle != CPU_NOT_IDLE) && (env->src_rq->cfs.h_nr_running == 1)) {
10074         if ((check_cpu_capacity(env->src_rq, sd)) &&
10075             (capacity_of(env->src_cpu) * sd->imbalance_pct < capacity_of(env->dst_cpu) * FAIR_ONEHUNDRED)) {
10076             return 1;
10077         }
10078     }
10079 
10080     if (env->migration_type == migrate_misfit) {
10081         return 1;
10082     }
10083 
10084     return 0;
10085 }
10086 
need_active_balance(struct lb_env * env)10087 static int need_active_balance(struct lb_env *env)
10088 {
10089     struct sched_domain *sd = env->sd;
10090 
10091     if (voluntary_active_balance(env)) {
10092         return 1;
10093     }
10094 
10095     return unlikely(sd->nr_balance_failed > sd->cache_nice_tries + 2);
10096 }
10097 
10098 #ifdef CONFIG_CPU_ISOLATION_OPT
group_balance_cpu_not_isolated(struct sched_group * sg)10099 int group_balance_cpu_not_isolated(struct sched_group *sg)
10100 {
10101     cpumask_t cpus;
10102 
10103     cpumask_and(&cpus, sched_group_span(sg), group_balance_mask(sg));
10104     cpumask_andnot(&cpus, &cpus, cpu_isolated_mask);
10105     return cpumask_first(&cpus);
10106 }
10107 #endif
10108 
10109 static int active_load_balance_cpu_stop(void *data);
10110 
should_we_balance(struct lb_env * env)10111 static int should_we_balance(struct lb_env *env)
10112 {
10113     struct sched_group *sg = env->sd->groups;
10114     int cpu;
10115 
10116     /*
10117      * Ensure the balancing environment is consistent; can happen
10118      * when the softirq triggers 'during' hotplug.
10119      */
10120     if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) {
10121         return 0;
10122     }
10123 
10124     /*
10125      * In the newly idle case, we will allow all the CPUs
10126      * to do the newly idle load balance.
10127      */
10128     if (env->idle == CPU_NEWLY_IDLE) {
10129         return 1;
10130     }
10131 
10132     /* Try to find first idle CPU */
10133     for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus)
10134     {
10135         if (!idle_cpu(cpu) || cpu_isolated(cpu)) {
10136             continue;
10137         }
10138 
10139         /* Are we the first idle CPU? */
10140         return cpu == env->dst_cpu;
10141     }
10142 
10143     /* Are we the first CPU of this group ? */
10144     return group_balance_cpu_not_isolated(sg) == env->dst_cpu;
10145 }
10146 
10147 /*
10148  * Check this_cpu to ensure it is balanced within domain. Attempt to move
10149  * tasks if there is an imbalance.
10150  */
load_balance(int this_cpu,struct rq * this_rq,struct sched_domain * sd,enum cpu_idle_type idle,int * continue_balancing)10151 static int load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum cpu_idle_type idle,
10152                         int *continue_balancing)
10153 {
10154     int ld_moved, cur_ld_moved, active_balance = 0;
10155     struct sched_domain *sd_parent = sd->parent;
10156     struct sched_group *group;
10157     struct rq *busiest;
10158     struct rq_flags rf;
10159     struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
10160 
10161     struct lb_env env = {
10162         .sd = sd,
10163         .dst_cpu = this_cpu,
10164         .dst_rq = this_rq,
10165         .dst_grpmask = sched_group_span(sd->groups),
10166         .idle = idle,
10167         .loop_break = sched_nr_migrate_break,
10168         .cpus = cpus,
10169         .fbq_type = all,
10170         .tasks = LIST_HEAD_INIT(env.tasks),
10171     };
10172 
10173     cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
10174 
10175     schedstat_inc(sd->lb_count[idle]);
10176 
10177 redo:
10178     if (!should_we_balance(&env)) {
10179         *continue_balancing = 0;
10180         goto out_balanced;
10181     }
10182 
10183     group = find_busiest_group(&env);
10184     if (!group) {
10185         schedstat_inc(sd->lb_nobusyg[idle]);
10186         goto out_balanced;
10187     }
10188 
10189     busiest = find_busiest_queue(&env, group);
10190     if (!busiest) {
10191         schedstat_inc(sd->lb_nobusyq[idle]);
10192         goto out_balanced;
10193     }
10194 
10195     BUG_ON(busiest == env.dst_rq);
10196 
10197     schedstat_add(sd->lb_imbalance[idle], env.imbalance);
10198 
10199     env.src_cpu = busiest->cpu;
10200     env.src_rq = busiest;
10201 
10202     ld_moved = 0;
10203     if (busiest->nr_running > 1) {
10204         /*
10205          * Attempt to move tasks. If find_busiest_group has found
10206          * an imbalance but busiest->nr_running <= 1, the group is
10207          * still unbalanced. ld_moved simply stays zero, so it is
10208          * correctly treated as an imbalance.
10209          */
10210         env.flags |= LBF_ALL_PINNED;
10211         env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
10212 
10213     more_balance:
10214         rq_lock_irqsave(busiest, &rf);
10215         update_rq_clock(busiest);
10216 
10217         /*
10218          * cur_ld_moved - load moved in current iteration
10219          * ld_moved     - cumulative load moved across iterations
10220          */
10221         cur_ld_moved = detach_tasks(&env);
10222 
10223         /*
10224          * We've detached some tasks from busiest_rq. Every
10225          * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
10226          * unlock busiest->lock, and we are able to be sure
10227          * that nobody can manipulate the tasks in parallel.
10228          * See task_rq_lock() family for the details.
10229          */
10230 
10231         rq_unlock(busiest, &rf);
10232 
10233         if (cur_ld_moved) {
10234             attach_tasks(&env);
10235             ld_moved += cur_ld_moved;
10236         }
10237 
10238         local_irq_restore(rf.flags);
10239 
10240         if (env.flags & LBF_NEED_BREAK) {
10241             env.flags &= ~LBF_NEED_BREAK;
10242             goto more_balance;
10243         }
10244 
10245         /*
10246          * Revisit (affine) tasks on src_cpu that couldn't be moved to
10247          * us and move them to an alternate dst_cpu in our sched_group
10248          * where they can run. The upper limit on how many times we
10249          * iterate on same src_cpu is dependent on number of CPUs in our
10250          * sched_group.
10251          *
10252          * This changes load balance semantics a bit on who can move
10253          * load to a given_cpu. In addition to the given_cpu itself
10254          * (or a ilb_cpu acting on its behalf where given_cpu is
10255          * nohz-idle), we now have balance_cpu in a position to move
10256          * load to given_cpu. In rare situations, this may cause
10257          * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
10258          * _independently_ and at _same_ time to move some load to
10259          * given_cpu) causing exceess load to be moved to given_cpu.
10260          * This however should not happen so much in practice and
10261          * moreover subsequent load balance cycles should correct the
10262          * excess load moved.
10263          */
10264         if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
10265             /* Prevent to re-select dst_cpu via env's CPUs */
10266             __cpumask_clear_cpu(env.dst_cpu, env.cpus);
10267 
10268             env.dst_rq = cpu_rq(env.new_dst_cpu);
10269             env.dst_cpu = env.new_dst_cpu;
10270             env.flags &= ~LBF_DST_PINNED;
10271             env.loop = 0;
10272             env.loop_break = sched_nr_migrate_break;
10273 
10274             /*
10275              * Go back to "more_balance" rather than "redo" since we
10276              * need to continue with same src_cpu.
10277              */
10278             goto more_balance;
10279         }
10280 
10281         /*
10282          * We failed to reach balance because of affinity.
10283          */
10284         if (sd_parent) {
10285             int *group_imbalance = &sd_parent->groups->sgc->imbalance;
10286 
10287             if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
10288                 *group_imbalance = 1;
10289             }
10290         }
10291 
10292         /* All tasks on this runqueue were pinned by CPU affinity */
10293         if (unlikely(env.flags & LBF_ALL_PINNED)) {
10294             __cpumask_clear_cpu(cpu_of(busiest), cpus);
10295             /*
10296              * Attempting to continue load balancing at the current
10297              * sched_domain level only makes sense if there are
10298              * active CPUs remaining as possible busiest CPUs to
10299              * pull load from which are not contained within the
10300              * destination group that is receiving any migrated
10301              * load.
10302              */
10303             if (!cpumask_subset(cpus, env.dst_grpmask)) {
10304                 env.loop = 0;
10305                 env.loop_break = sched_nr_migrate_break;
10306                 goto redo;
10307             }
10308             goto out_all_pinned;
10309         }
10310     }
10311 
10312     if (!ld_moved) {
10313         schedstat_inc(sd->lb_failed[idle]);
10314         /*
10315          * Increment the failure counter only on periodic balance.
10316          * We do not want newidle balance, which can be very
10317          * frequent, pollute the failure counter causing
10318          * excessive cache_hot migrations and active balances.
10319          */
10320         if (idle != CPU_NEWLY_IDLE) {
10321             sd->nr_balance_failed++;
10322         }
10323 
10324         if (need_active_balance(&env)) {
10325             unsigned long flags;
10326 
10327             raw_spin_lock_irqsave(&busiest->lock, flags);
10328 
10329             /*
10330              * Don't kick the active_load_balance_cpu_stop,
10331              * if the curr task on busiest CPU can't be
10332              * moved to this_cpu:
10333              */
10334             if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
10335                 raw_spin_unlock_irqrestore(&busiest->lock, flags);
10336                 env.flags |= LBF_ALL_PINNED;
10337                 goto out_one_pinned;
10338             }
10339 
10340             /*
10341              * ->active_balance synchronizes accesses to
10342              * ->active_balance_work.  Once set, it's cleared
10343              * only after active load balance is finished.
10344              */
10345             if (!busiest->active_balance && !cpu_isolated(cpu_of(busiest))) {
10346                 busiest->active_balance = 1;
10347                 busiest->push_cpu = this_cpu;
10348                 active_balance = 1;
10349             }
10350             raw_spin_unlock_irqrestore(&busiest->lock, flags);
10351 
10352             if (active_balance) {
10353                 stop_one_cpu_nowait(cpu_of(busiest), active_load_balance_cpu_stop, busiest,
10354                                     &busiest->active_balance_work);
10355             }
10356 
10357             /* We've kicked active balancing, force task migration. */
10358             sd->nr_balance_failed = sd->cache_nice_tries + 1;
10359         }
10360     } else {
10361         sd->nr_balance_failed = 0;
10362     }
10363 
10364     if (likely(!active_balance) || voluntary_active_balance(&env)) {
10365         /* We were unbalanced, so reset the balancing interval */
10366         sd->balance_interval = sd->min_interval;
10367     } else {
10368         /*
10369          * If we've begun active balancing, start to back off. This
10370          * case may not be covered by the all_pinned logic if there
10371          * is only 1 task on the busy runqueue (because we don't call
10372          * detach_tasks).
10373          */
10374         if (sd->balance_interval < sd->max_interval) {
10375             sd->balance_interval *= 0x2;
10376         }
10377     }
10378 
10379     goto out;
10380 
10381 out_balanced:
10382     /*
10383      * We reach balance although we may have faced some affinity
10384      * constraints. Clear the imbalance flag only if other tasks got
10385      * a chance to move and fix the imbalance.
10386      */
10387     if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
10388         int *group_imbalance = &sd_parent->groups->sgc->imbalance;
10389 
10390         if (*group_imbalance) {
10391             *group_imbalance = 0;
10392         }
10393     }
10394 
10395 out_all_pinned:
10396     /*
10397      * We reach balance because all tasks are pinned at this level so
10398      * we can't migrate them. Let the imbalance flag set so parent level
10399      * can try to migrate them.
10400      */
10401     schedstat_inc(sd->lb_balanced[idle]);
10402 
10403     sd->nr_balance_failed = 0;
10404 
10405 out_one_pinned:
10406     ld_moved = 0;
10407 
10408     /*
10409      * newidle_balance() disregards balance intervals, so we could
10410      * repeatedly reach this code, which would lead to balance_interval
10411      * skyrocketting in a short amount of time. Skip the balance_interval
10412      * increase logic to avoid that.
10413      */
10414     if (env.idle == CPU_NEWLY_IDLE) {
10415         goto out;
10416     }
10417 
10418     /* tune up the balancing interval */
10419     if ((env.flags & LBF_ALL_PINNED && sd->balance_interval < MAX_PINNED_INTERVAL) ||
10420         sd->balance_interval < sd->max_interval) {
10421         sd->balance_interval *= 0x2;
10422     }
10423 out:
10424     return ld_moved;
10425 }
10426 
get_sd_balance_interval(struct sched_domain * sd,int cpu_busy)10427 static inline unsigned long get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
10428 {
10429     unsigned long interval = sd->balance_interval;
10430 
10431     if (cpu_busy) {
10432         interval *= sd->busy_factor;
10433     }
10434 
10435     /* scale ms to jiffies */
10436     interval = msecs_to_jiffies(interval);
10437 
10438     /*
10439      * Reduce likelihood of busy balancing at higher domains racing with
10440      * balancing at lower domains by preventing their balancing periods
10441      * from being multiples of each other.
10442      */
10443     if (cpu_busy) {
10444         interval -= 1;
10445     }
10446 
10447     interval = clamp(interval, 1UL, max_load_balance_interval);
10448 
10449     return interval;
10450 }
10451 
update_next_balance(struct sched_domain * sd,unsigned long * next_balance)10452 static inline void update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
10453 {
10454     unsigned long interval, next;
10455 
10456     /* used by idle balance, so cpu_busy = 0 */
10457     interval = get_sd_balance_interval(sd, 0);
10458     next = sd->last_balance + interval;
10459 
10460     if (time_after(*next_balance, next)) {
10461         *next_balance = next;
10462     }
10463 }
10464 
10465 /*
10466  * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
10467  * running tasks off the busiest CPU onto idle CPUs. It requires at
10468  * least 1 task to be running on each physical CPU where possible, and
10469  * avoids physical / logical imbalances.
10470  */
active_load_balance_cpu_stop(void * data)10471 static int active_load_balance_cpu_stop(void *data)
10472 {
10473     struct rq *busiest_rq = data;
10474     int busiest_cpu = cpu_of(busiest_rq);
10475     int target_cpu = busiest_rq->push_cpu;
10476     struct rq *target_rq = cpu_rq(target_cpu);
10477     struct sched_domain *sd = NULL;
10478     struct task_struct *p = NULL;
10479     struct rq_flags rf;
10480 #ifdef CONFIG_SCHED_EAS
10481     struct task_struct *push_task;
10482     int push_task_detached = 0;
10483 #endif
10484 
10485     rq_lock_irq(busiest_rq, &rf);
10486     /*
10487      * Between queueing the stop-work and running it is a hole in which
10488      * CPUs can become inactive. We should not move tasks from or to
10489      * inactive CPUs.
10490      */
10491     if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu)) {
10492         goto out_unlock;
10493     }
10494 
10495     /* Make sure the requested CPU hasn't gone down in the meantime: */
10496     if (unlikely(busiest_cpu != smp_processor_id() || !busiest_rq->active_balance)) {
10497         goto out_unlock;
10498     }
10499 
10500     /* Is there any task to move? */
10501     if (busiest_rq->nr_running <= 1) {
10502         goto out_unlock;
10503     }
10504 
10505     /*
10506      * This condition is "impossible", if it occurs
10507      * we need to fix it. Originally reported by
10508      * Bjorn Helgaas on a 128-CPU setup.
10509      */
10510     BUG_ON(busiest_rq == target_rq);
10511 
10512 #ifdef CONFIG_SCHED_EAS
10513     push_task = busiest_rq->push_task;
10514     target_cpu = busiest_rq->push_cpu;
10515     if (push_task) {
10516         struct lb_env env = {
10517             .sd = sd,
10518             .dst_cpu = target_cpu,
10519             .dst_rq = target_rq,
10520             .src_cpu = busiest_rq->cpu,
10521             .src_rq = busiest_rq,
10522             .idle = CPU_IDLE,
10523             .flags = 0,
10524             .loop = 0,
10525         };
10526         if (task_on_rq_queued(push_task) && push_task->state == TASK_RUNNING && task_cpu(push_task) == busiest_cpu &&
10527             cpu_online(target_cpu)) {
10528             update_rq_clock(busiest_rq);
10529             detach_task(push_task, &env);
10530             push_task_detached = 1;
10531         }
10532         goto out_unlock;
10533     }
10534 #endif
10535 
10536     /* Search for an sd spanning us and the target CPU. */
10537     rcu_read_lock();
10538     for_each_domain(target_cpu, sd)
10539     {
10540         if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) {
10541             break;
10542         }
10543     }
10544 
10545     if (likely(sd)) {
10546         struct lb_env env = {
10547             .sd = sd,
10548             .dst_cpu = target_cpu,
10549             .dst_rq = target_rq,
10550             .src_cpu = busiest_rq->cpu,
10551             .src_rq = busiest_rq,
10552             .idle = CPU_IDLE,
10553             /*
10554              * can_migrate_task() doesn't need to compute new_dst_cpu
10555              * for active balancing. Since we have CPU_IDLE, but no
10556              * @dst_grpmask we need to make that test go away with lying
10557              * about DST_PINNED.
10558              */
10559             .flags = LBF_DST_PINNED,
10560         };
10561 
10562         schedstat_inc(sd->alb_count);
10563         update_rq_clock(busiest_rq);
10564 
10565         p = detach_one_task(&env);
10566         if (p) {
10567             schedstat_inc(sd->alb_pushed);
10568             /* Active balancing done, reset the failure counter. */
10569             sd->nr_balance_failed = 0;
10570         } else {
10571             schedstat_inc(sd->alb_failed);
10572         }
10573     }
10574     rcu_read_unlock();
10575 out_unlock:
10576     busiest_rq->active_balance = 0;
10577 
10578 #ifdef CONFIG_SCHED_EAS
10579     push_task = busiest_rq->push_task;
10580     if (push_task) {
10581         busiest_rq->push_task = NULL;
10582     }
10583 #endif
10584     rq_unlock(busiest_rq, &rf);
10585 
10586 #ifdef CONFIG_SCHED_EAS
10587     if (push_task) {
10588         if (push_task_detached) {
10589             attach_one_task(target_rq, push_task);
10590         }
10591 
10592         put_task_struct(push_task);
10593     }
10594 #endif
10595 
10596     if (p) {
10597         attach_one_task(target_rq, p);
10598     }
10599 
10600     local_irq_enable();
10601 
10602     return 0;
10603 }
10604 
10605 static DEFINE_SPINLOCK(balancing);
10606 
10607 /*
10608  * Scale the max load_balance interval with the number of CPUs in the system.
10609  * This trades load-balance latency on larger machines for less cross talk.
10610  */
update_max_interval(void)10611 void update_max_interval(void)
10612 {
10613     unsigned int available_cpus;
10614 #ifdef CONFIG_CPU_ISOLATION_OPT
10615     cpumask_t avail_mask;
10616 
10617     cpumask_andnot(&avail_mask, cpu_online_mask, cpu_isolated_mask);
10618     available_cpus = cpumask_weight(&avail_mask);
10619 #else
10620     available_cpus = num_online_cpus();
10621 #endif
10622 
10623     max_load_balance_interval = HZ * available_cpus / 0xa;
10624 }
10625 
10626 /*
10627  * It checks each scheduling domain to see if it is due to be balanced,
10628  * and initiates a balancing operation if so.
10629  *
10630  * Balancing parameters are set up in init_sched_domains.
10631  */
rebalance_domains(struct rq * rq,enum cpu_idle_type idle)10632 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
10633 {
10634     int continue_balancing = 1;
10635     int cpu = rq->cpu;
10636     int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
10637     unsigned long interval;
10638     struct sched_domain *sd;
10639     /* Earliest time when we have to do rebalance again */
10640     unsigned long next_balance = jiffies + 60 * HZ;
10641     int update_next_balance = 0;
10642     int need_serialize, need_decay = 0;
10643     u64 max_cost = 0;
10644 
10645     rcu_read_lock();
10646     for_each_domain(cpu, sd)
10647     {
10648         /*
10649          * Decay the newidle max times here because this is a regular
10650          * visit to all the domains. Decay ~1% per second.
10651          */
10652         if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
10653             sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * FAIR_TWOHUNDREDFIFTYTHREE) / FAIR_TWOHUNDREDFIFTYSIX;
10654             sd->next_decay_max_lb_cost = jiffies + HZ;
10655             need_decay = 1;
10656         }
10657         max_cost += sd->max_newidle_lb_cost;
10658 
10659         /*
10660          * Stop the load balance at this level. There is another
10661          * CPU in our sched group which is doing load balancing more
10662          * actively.
10663          */
10664         if (!continue_balancing) {
10665             if (need_decay) {
10666                 continue;
10667             }
10668             break;
10669         }
10670 
10671         interval = get_sd_balance_interval(sd, busy);
10672 
10673         need_serialize = sd->flags & SD_SERIALIZE;
10674         if (need_serialize) {
10675             if (!spin_trylock(&balancing)) {
10676                 goto out;
10677             }
10678         }
10679 
10680         if (time_after_eq(jiffies, sd->last_balance + interval)) {
10681             if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
10682                 /*
10683                  * The LBF_DST_PINNED logic could have changed
10684                  * env->dst_cpu, so we can't know our idle
10685                  * state even if we migrated tasks. Update it.
10686                  */
10687                 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
10688                 busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
10689             }
10690             sd->last_balance = jiffies;
10691             interval = get_sd_balance_interval(sd, busy);
10692         }
10693         if (need_serialize) {
10694             spin_unlock(&balancing);
10695         }
10696     out:
10697         if (time_after(next_balance, sd->last_balance + interval)) {
10698             next_balance = sd->last_balance + interval;
10699             update_next_balance = 1;
10700         }
10701     }
10702     if (need_decay) {
10703         /*
10704          * Ensure the rq-wide value also decays but keep it at a
10705          * reasonable floor to avoid funnies with rq->avg_idle.
10706          */
10707         rq->max_idle_balance_cost = max((u64)sysctl_sched_migration_cost, max_cost);
10708     }
10709     rcu_read_unlock();
10710 
10711     /*
10712      * next_balance will be updated only when there is a need.
10713      * When the cpu is attached to null domain for ex, it will not be
10714      * updated.
10715      */
10716     if (likely(update_next_balance)) {
10717         rq->next_balance = next_balance;
10718 
10719 #ifdef CONFIG_NO_HZ_COMMON
10720         /*
10721          * If this CPU has been elected to perform the nohz idle
10722          * balance. Other idle CPUs have already rebalanced with
10723          * nohz_idle_balance() and nohz.next_balance has been
10724          * updated accordingly. This CPU is now running the idle load
10725          * balance for itself and we need to update the
10726          * nohz.next_balance accordingly.
10727          */
10728         if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) {
10729             nohz.next_balance = rq->next_balance;
10730         }
10731 #endif
10732     }
10733 }
10734 
on_null_domain(struct rq * rq)10735 static inline int on_null_domain(struct rq *rq)
10736 {
10737     return unlikely(!rcu_dereference_sched(rq->sd));
10738 }
10739 
10740 #ifdef CONFIG_NO_HZ_COMMON
10741 /*
10742  * idle load balancing details
10743  * - When one of the busy CPUs notice that there may be an idle rebalancing
10744  *   needed, they will kick the idle load balancer, which then does idle
10745  *   load balancing for all the idle CPUs.
10746  * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set
10747  *   anywhere yet.
10748  */
10749 
find_new_ilb(void)10750 static inline int find_new_ilb(void)
10751 {
10752     int ilb;
10753 
10754     for_each_cpu_and(ilb, nohz.idle_cpus_mask, housekeeping_cpumask(HK_FLAG_MISC))
10755     {
10756         if (cpu_isolated(ilb)) {
10757             continue;
10758         }
10759 
10760         if (idle_cpu(ilb)) {
10761             return ilb;
10762         }
10763     }
10764 
10765     return nr_cpu_ids;
10766 }
10767 
10768 /*
10769  * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
10770  * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
10771  */
kick_ilb(unsigned int flags)10772 static void kick_ilb(unsigned int flags)
10773 {
10774     int ilb_cpu;
10775 
10776     /*
10777      * Increase nohz.next_balance only when if full ilb is triggered but
10778      * not if we only update stats.
10779      */
10780     if (flags & NOHZ_BALANCE_KICK) {
10781         nohz.next_balance = jiffies + 1;
10782     }
10783 
10784     ilb_cpu = find_new_ilb();
10785     if (ilb_cpu >= nr_cpu_ids) {
10786         return;
10787     }
10788 
10789     /*
10790      * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
10791      * the first flag owns it; cleared by nohz_csd_func().
10792      */
10793     flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
10794     if (flags & NOHZ_KICK_MASK) {
10795         return;
10796     }
10797 
10798     /*
10799      * This way we generate an IPI on the target CPU which
10800      * is idle. And the softirq performing nohz idle load balance
10801      * will be run before returning from the IPI.
10802      */
10803     smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
10804 }
10805 
10806 /*
10807  * Current decision point for kicking the idle load balancer in the presence
10808  * of idle CPUs in the system.
10809  */
nohz_balancer_kick(struct rq * rq)10810 static void nohz_balancer_kick(struct rq *rq)
10811 {
10812     unsigned long now = jiffies;
10813     struct sched_domain_shared *sds;
10814     struct sched_domain *sd;
10815     int nr_busy, i, cpu = rq->cpu;
10816     unsigned int flags = 0;
10817     cpumask_t cpumask;
10818 
10819     if (unlikely(rq->idle_balance)) {
10820         return;
10821     }
10822 
10823     /*
10824      * We may be recently in ticked or tickless idle mode. At the first
10825      * busy tick after returning from idle, we will update the busy stats.
10826      */
10827     nohz_balance_exit_idle(rq);
10828 
10829     /*
10830      * None are in tickless mode and hence no need for NOHZ idle load
10831      * balancing.
10832      */
10833 #ifdef CONFIG_CPU_ISOLATION_OPT
10834     cpumask_andnot(&cpumask, nohz.idle_cpus_mask, cpu_isolated_mask);
10835     if (cpumask_empty(&cpumask)) {
10836         return;
10837     }
10838 #else
10839     cpumask_copy(&cpumask, nohz.idle_cpus_mask);
10840     if (likely(!atomic_read(&nohz.nr_cpus))) {
10841         return;
10842     }
10843 #endif
10844 
10845     if (READ_ONCE(nohz.has_blocked) && time_after(now, READ_ONCE(nohz.next_blocked))) {
10846         flags = NOHZ_STATS_KICK;
10847     }
10848 
10849     if (time_before(now, nohz.next_balance)) {
10850         goto out;
10851     }
10852 
10853     if (rq->nr_running >= 0x2) {
10854         flags = NOHZ_KICK_MASK;
10855         goto out;
10856     }
10857 
10858     rcu_read_lock();
10859 
10860     sd = rcu_dereference(rq->sd);
10861     if (sd) {
10862         /*
10863          * If there's a CFS task and the current CPU has reduced
10864          * capacity; kick the ILB to see if there's a better CPU to run
10865          * on.
10866          */
10867         if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
10868             flags = NOHZ_KICK_MASK;
10869             goto unlock;
10870         }
10871     }
10872 
10873     sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
10874     if (sd) {
10875         /*
10876          * When ASYM_PACKING; see if there's a more preferred CPU
10877          * currently idle; in which case, kick the ILB to move tasks
10878          * around.
10879          */
10880         for_each_cpu_and(i, sched_domain_span(sd), &cpumask)
10881         {
10882             if (sched_asym_prefer(i, cpu)) {
10883                 flags = NOHZ_KICK_MASK;
10884                 goto unlock;
10885             }
10886         }
10887     }
10888 
10889     sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
10890     if (sd) {
10891         /*
10892          * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
10893          * to run the misfit task on.
10894          */
10895         if (check_misfit_status(rq, sd)) {
10896             flags = NOHZ_KICK_MASK;
10897             goto unlock;
10898         }
10899 
10900         /*
10901          * For asymmetric systems, we do not want to nicely balance
10902          * cache use, instead we want to embrace asymmetry and only
10903          * ensure tasks have enough CPU capacity.
10904          *
10905          * Skip the LLC logic because it's not relevant in that case.
10906          */
10907         goto unlock;
10908     }
10909 
10910     sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
10911     if (sds) {
10912         /*
10913          * If there is an imbalance between LLC domains (IOW we could
10914          * increase the overall cache use), we need some less-loaded LLC
10915          * domain to pull some load. Likewise, we may need to spread
10916          * load within the current LLC domain (e.g. packed SMT cores but
10917          * other CPUs are idle). We can't really know from here how busy
10918          * the others are - so just get a nohz balance going if it looks
10919          * like this LLC domain has tasks we could move.
10920          */
10921         nr_busy = atomic_read(&sds->nr_busy_cpus);
10922         if (nr_busy > 1) {
10923             flags = NOHZ_KICK_MASK;
10924             goto unlock;
10925         }
10926     }
10927 unlock:
10928     rcu_read_unlock();
10929 out:
10930     if (flags) {
10931         kick_ilb(flags);
10932     }
10933 }
10934 
set_cpu_sd_state_busy(int cpu)10935 static void set_cpu_sd_state_busy(int cpu)
10936 {
10937     struct sched_domain *sd;
10938 
10939     rcu_read_lock();
10940     sd = rcu_dereference(per_cpu(sd_llc, cpu));
10941     if (!sd || !sd->nohz_idle) {
10942         goto unlock;
10943     }
10944     sd->nohz_idle = 0;
10945 
10946     atomic_inc(&sd->shared->nr_busy_cpus);
10947 unlock:
10948     rcu_read_unlock();
10949 }
10950 
nohz_balance_exit_idle(struct rq * rq)10951 void nohz_balance_exit_idle(struct rq *rq)
10952 {
10953     SCHED_WARN_ON(rq != this_rq());
10954 
10955     if (likely(!rq->nohz_tick_stopped)) {
10956         return;
10957     }
10958 
10959     rq->nohz_tick_stopped = 0;
10960     cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
10961     atomic_dec(&nohz.nr_cpus);
10962 
10963     set_cpu_sd_state_busy(rq->cpu);
10964 }
10965 
set_cpu_sd_state_idle(int cpu)10966 static void set_cpu_sd_state_idle(int cpu)
10967 {
10968     struct sched_domain *sd;
10969 
10970     rcu_read_lock();
10971     sd = rcu_dereference(per_cpu(sd_llc, cpu));
10972     if (!sd || sd->nohz_idle) {
10973         goto unlock;
10974     }
10975     sd->nohz_idle = 1;
10976 
10977     atomic_dec(&sd->shared->nr_busy_cpus);
10978 unlock:
10979     rcu_read_unlock();
10980 }
10981 
10982 /*
10983  * This routine will record that the CPU is going idle with tick stopped.
10984  * This info will be used in performing idle load balancing in the future.
10985  */
nohz_balance_enter_idle(int cpu)10986 void nohz_balance_enter_idle(int cpu)
10987 {
10988     struct rq *rq = cpu_rq(cpu);
10989 
10990     SCHED_WARN_ON(cpu != smp_processor_id());
10991 
10992     if (!cpu_active(cpu)) {
10993         /*
10994          * A CPU can be paused while it is idle with it's tick
10995          * stopped. nohz_balance_exit_idle() should be called
10996          * from the local CPU, so it can't be called during
10997          * pause. This results in paused CPU participating in
10998          * the nohz idle balance, which should be avoided.
10999          *
11000          * When the paused CPU exits idle and enters again,
11001          * exempt the paused CPU from nohz_balance_exit_idle.
11002          */
11003         nohz_balance_exit_idle(rq);
11004         return;
11005     }
11006 
11007     /* Spare idle load balancing on CPUs that don't want to be disturbed: */
11008     if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) {
11009         return;
11010     }
11011 
11012     /*
11013      * Can be set safely without rq->lock held
11014      * If a clear happens, it will have evaluated last additions because
11015      * rq->lock is held during the check and the clear
11016      */
11017     rq->has_blocked_load = 1;
11018 
11019     /*
11020      * The tick is still stopped but load could have been added in the
11021      * meantime. We set the nohz.has_blocked flag to trig a check of the
11022      * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
11023      * of nohz.has_blocked can only happen after checking the new load
11024      */
11025     if (rq->nohz_tick_stopped) {
11026         goto out;
11027     }
11028 
11029     /* If we're a completely isolated CPU, we don't play: */
11030     if (on_null_domain(rq)) {
11031         return;
11032     }
11033 
11034     rq->nohz_tick_stopped = 1;
11035 
11036     cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
11037     atomic_inc(&nohz.nr_cpus);
11038 
11039     /*
11040      * Ensures that if nohz_idle_balance() fails to observe our
11041      * @idle_cpus_mask store, it must observe the @has_blocked
11042      * store.
11043      */
11044     smp_mb__after_atomic();
11045 
11046     set_cpu_sd_state_idle(cpu);
11047 
11048 out:
11049     /*
11050      * Each time a cpu enter idle, we assume that it has blocked load and
11051      * enable the periodic update of the load of idle cpus
11052      */
11053     WRITE_ONCE(nohz.has_blocked, 1);
11054 }
11055 
11056 /*
11057  * Internal function that runs load balance for all idle cpus. The load balance
11058  * can be a simple update of blocked load or a complete load balance with
11059  * tasks movement depending of flags.
11060  * The function returns false if the loop has stopped before running
11061  * through all idle CPUs.
11062  */
_nohz_idle_balance(struct rq * this_rq,unsigned int flags,enum cpu_idle_type idle)11063 static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, enum cpu_idle_type idle)
11064 {
11065     /* Earliest time when we have to do rebalance again */
11066     unsigned long now = jiffies;
11067     unsigned long next_balance = now + 60 * HZ;
11068     bool has_blocked_load = false;
11069     int update_next_balance = 0;
11070     int this_cpu = this_rq->cpu;
11071     int balance_cpu;
11072     int ret = false;
11073     struct rq *rq;
11074     cpumask_t cpus;
11075 
11076     SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
11077 
11078     /*
11079      * We assume there will be no idle load after this update and clear
11080      * the has_blocked flag. If a cpu enters idle in the mean time, it will
11081      * set the has_blocked flag and trig another update of idle load.
11082      * Because a cpu that becomes idle, is added to idle_cpus_mask before
11083      * setting the flag, we are sure to not clear the state and not
11084      * check the load of an idle cpu.
11085      */
11086     WRITE_ONCE(nohz.has_blocked, 0);
11087 
11088     /*
11089      * Ensures that if we miss the CPU, we must see the has_blocked
11090      * store from nohz_balance_enter_idle().
11091      */
11092     smp_mb();
11093 
11094 #ifdef CONFIG_CPU_ISOLATION_OPT
11095     cpumask_andnot(&cpus, nohz.idle_cpus_mask, cpu_isolated_mask);
11096 #else
11097     cpumask_copy(&cpus, nohz.idle_cpus_mask);
11098 #endif
11099 
11100     for_each_cpu(balance_cpu, &cpus)
11101     {
11102         if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) {
11103             continue;
11104         }
11105 
11106         /*
11107          * If this CPU gets work to do, stop the load balancing
11108          * work being done for other CPUs. Next load
11109          * balancing owner will pick it up.
11110          */
11111         if (need_resched()) {
11112             has_blocked_load = true;
11113             goto abort;
11114         }
11115 
11116         rq = cpu_rq(balance_cpu);
11117 
11118         has_blocked_load |= update_nohz_stats(rq, true);
11119 
11120         /*
11121          * If time for next balance is due,
11122          * do the balance.
11123          */
11124         if (time_after_eq(jiffies, rq->next_balance)) {
11125             struct rq_flags rf;
11126 
11127             rq_lock_irqsave(rq, &rf);
11128             update_rq_clock(rq);
11129             rq_unlock_irqrestore(rq, &rf);
11130 
11131             if (flags & NOHZ_BALANCE_KICK) {
11132                 rebalance_domains(rq, CPU_IDLE);
11133             }
11134         }
11135 
11136         if (time_after(next_balance, rq->next_balance)) {
11137             next_balance = rq->next_balance;
11138             update_next_balance = 1;
11139         }
11140     }
11141 
11142     /*
11143      * next_balance will be updated only when there is a need.
11144      * When the CPU is attached to null domain for ex, it will not be
11145      * updated.
11146      */
11147     if (likely(update_next_balance)) {
11148         nohz.next_balance = next_balance;
11149     }
11150 
11151     /* Newly idle CPU doesn't need an update */
11152     if (idle != CPU_NEWLY_IDLE) {
11153         update_blocked_averages(this_cpu);
11154         has_blocked_load |= this_rq->has_blocked_load;
11155     }
11156 
11157     if (flags & NOHZ_BALANCE_KICK) {
11158         rebalance_domains(this_rq, CPU_IDLE);
11159     }
11160 
11161     WRITE_ONCE(nohz.next_blocked, now + msecs_to_jiffies(LOAD_AVG_PERIOD));
11162 
11163     /* The full idle balance loop has been done */
11164     ret = true;
11165 
11166 abort:
11167     /* There is still blocked load, enable periodic update */
11168     if (has_blocked_load) {
11169         WRITE_ONCE(nohz.has_blocked, 1);
11170     }
11171 
11172     return ret;
11173 }
11174 
11175 /*
11176  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
11177  * rebalancing for all the cpus for whom scheduler ticks are stopped.
11178  */
nohz_idle_balance(struct rq * this_rq,enum cpu_idle_type idle)11179 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
11180 {
11181     unsigned int flags = this_rq->nohz_idle_balance;
11182 
11183     if (!flags) {
11184         return false;
11185     }
11186 
11187     this_rq->nohz_idle_balance = 0;
11188 
11189     if (idle != CPU_IDLE) {
11190         return false;
11191     }
11192 
11193     _nohz_idle_balance(this_rq, flags, idle);
11194 
11195     return true;
11196 }
11197 
nohz_newidle_balance(struct rq * this_rq)11198 static void nohz_newidle_balance(struct rq *this_rq)
11199 {
11200     int this_cpu = this_rq->cpu;
11201 
11202     /*
11203      * This CPU doesn't want to be disturbed by scheduler
11204      * housekeeping
11205      */
11206     if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED)) {
11207         return;
11208     }
11209 
11210     /* Will wake up very soon. No time for doing anything else */
11211     if (this_rq->avg_idle < sysctl_sched_migration_cost) {
11212         return;
11213     }
11214 
11215     /* Don't need to update blocked load of idle CPUs */
11216     if (!READ_ONCE(nohz.has_blocked) || time_before(jiffies, READ_ONCE(nohz.next_blocked))) {
11217         return;
11218     }
11219 
11220     raw_spin_unlock(&this_rq->lock);
11221     /*
11222      * This CPU is going to be idle and blocked load of idle CPUs
11223      * need to be updated. Run the ilb locally as it is a good
11224      * candidate for ilb instead of waking up another idle CPU.
11225      * Kick an normal ilb if we failed to do the update.
11226      */
11227     if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE)) {
11228         kick_ilb(NOHZ_STATS_KICK);
11229     }
11230     raw_spin_lock(&this_rq->lock);
11231 }
11232 
11233 #else  /* !CONFIG_NO_HZ_COMMON */
nohz_balancer_kick(struct rq * rq)11234 static inline void nohz_balancer_kick(struct rq *rq)
11235 {
11236 }
11237 
nohz_idle_balance(struct rq * this_rq,enum cpu_idle_type idle)11238 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
11239 {
11240     return false;
11241 }
11242 
nohz_newidle_balance(struct rq * this_rq)11243 static inline void nohz_newidle_balance(struct rq *this_rq)
11244 {
11245 }
11246 #endif /* CONFIG_NO_HZ_COMMON */
11247 
11248 /*
11249  * idle_balance is called by schedule() if this_cpu is about to become
11250  * idle. Attempts to pull tasks from other CPUs.
11251  *
11252  * Returns:
11253  *   < 0 - we released the lock and there are !fair tasks present
11254  *     0 - failed, no new tasks
11255  *   > 0 - success, new (fair) tasks present
11256  */
newidle_balance(struct rq * this_rq,struct rq_flags * rf)11257 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
11258 {
11259     unsigned long next_balance = jiffies + HZ;
11260     int this_cpu = this_rq->cpu;
11261     struct sched_domain *sd;
11262     int pulled_task = 0;
11263     u64 curr_cost = 0;
11264 
11265     if (cpu_isolated(this_cpu)) {
11266         return 0;
11267     }
11268 
11269     update_misfit_status(NULL, this_rq);
11270     /*
11271      * We must set idle_stamp _before_ calling idle_balance(), such that we
11272      * measure the duration of idle_balance() as idle time.
11273      */
11274     this_rq->idle_stamp = rq_clock(this_rq);
11275 
11276     /*
11277      * Do not pull tasks towards !active CPUs...
11278      */
11279     if (!cpu_active(this_cpu)) {
11280         return 0;
11281     }
11282 
11283     /*
11284      * This is OK, because current is on_cpu, which avoids it being picked
11285      * for load-balance and preemption/IRQs are still disabled avoiding
11286      * further scheduler activity on it and we're being very careful to
11287      * re-start the picking loop.
11288      */
11289     rq_unpin_lock(this_rq, rf);
11290 
11291     if (this_rq->avg_idle < sysctl_sched_migration_cost || !READ_ONCE(this_rq->rd->overload)) {
11292         rcu_read_lock();
11293         sd = rcu_dereference_check_sched_domain(this_rq->sd);
11294         if (sd) {
11295             update_next_balance(sd, &next_balance);
11296         }
11297         rcu_read_unlock();
11298 
11299         nohz_newidle_balance(this_rq);
11300 
11301         goto out;
11302     }
11303 
11304     raw_spin_unlock(&this_rq->lock);
11305 
11306     update_blocked_averages(this_cpu);
11307     rcu_read_lock();
11308     for_each_domain(this_cpu, sd)
11309     {
11310         int continue_balancing = 1;
11311         u64 t0, domain_cost;
11312 
11313         if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
11314             update_next_balance(sd, &next_balance);
11315             break;
11316         }
11317 
11318         if (sd->flags & SD_BALANCE_NEWIDLE) {
11319             t0 = sched_clock_cpu(this_cpu);
11320 
11321             pulled_task = load_balance(this_cpu, this_rq, sd, CPU_NEWLY_IDLE, &continue_balancing);
11322 
11323             domain_cost = sched_clock_cpu(this_cpu) - t0;
11324             if (domain_cost > sd->max_newidle_lb_cost) {
11325                 sd->max_newidle_lb_cost = domain_cost;
11326             }
11327 
11328             curr_cost += domain_cost;
11329         }
11330 
11331         update_next_balance(sd, &next_balance);
11332 
11333         /*
11334          * Stop searching for tasks to pull if there are
11335          * now runnable tasks on this rq.
11336          */
11337         if (pulled_task || this_rq->nr_running > 0) {
11338             break;
11339         }
11340     }
11341     rcu_read_unlock();
11342 
11343     raw_spin_lock(&this_rq->lock);
11344 
11345     if (curr_cost > this_rq->max_idle_balance_cost) {
11346         this_rq->max_idle_balance_cost = curr_cost;
11347     }
11348 
11349 out:
11350     /*
11351      * While browsing the domains, we released the rq lock, a task could
11352      * have been enqueued in the meantime. Since we're not going idle,
11353      * pretend we pulled a task.
11354      */
11355     if (this_rq->cfs.h_nr_running && !pulled_task) {
11356         pulled_task = 1;
11357     }
11358 
11359     /* Move the next balance forward */
11360     if (time_after(this_rq->next_balance, next_balance)) {
11361         this_rq->next_balance = next_balance;
11362     }
11363 
11364     /* Is there a task of a high priority class? */
11365     if (this_rq->nr_running != this_rq->cfs.h_nr_running) {
11366         pulled_task = -1;
11367     }
11368 
11369     if (pulled_task) {
11370         this_rq->idle_stamp = 0;
11371     }
11372 
11373     rq_repin_lock(this_rq, rf);
11374 
11375     return pulled_task;
11376 }
11377 
11378 /*
11379  * run_rebalance_domains is triggered when needed from the scheduler tick.
11380  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
11381  */
run_rebalance_domains(struct softirq_action * h)11382 static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
11383 {
11384     struct rq *this_rq = this_rq();
11385     enum cpu_idle_type idle = this_rq->idle_balance ? CPU_IDLE : CPU_NOT_IDLE;
11386 
11387     /*
11388      * Since core isolation doesn't update nohz.idle_cpus_mask, there
11389      * is a possibility this nohz kicked cpu could be isolated. Hence
11390      * return if the cpu is isolated.
11391      */
11392     if (cpu_isolated(this_rq->cpu)) {
11393         return;
11394     }
11395 
11396     /*
11397      * If this CPU has a pending nohz_balance_kick, then do the
11398      * balancing on behalf of the other idle CPUs whose ticks are
11399      * stopped. Do nohz_idle_balance *before* rebalance_domains to
11400      * give the idle CPUs a chance to load balance. Else we may
11401      * load balance only within the local sched_domain hierarchy
11402      * and abort nohz_idle_balance altogether if we pull some load.
11403      */
11404     if (nohz_idle_balance(this_rq, idle)) {
11405         return;
11406     }
11407 
11408     /* normal load balance */
11409     update_blocked_averages(this_rq->cpu);
11410     rebalance_domains(this_rq, idle);
11411 }
11412 
11413 /*
11414  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
11415  */
trigger_load_balance(struct rq * rq)11416 void trigger_load_balance(struct rq *rq)
11417 {
11418     /* Don't need to rebalance while attached to NULL domain or
11419      * cpu is isolated.
11420      */
11421     if (unlikely(on_null_domain(rq)) || cpu_isolated(cpu_of(rq))) {
11422         return;
11423     }
11424 
11425     if (time_after_eq(jiffies, rq->next_balance)) {
11426         raise_softirq(SCHED_SOFTIRQ);
11427     }
11428 
11429     nohz_balancer_kick(rq);
11430 }
11431 
rq_online_fair(struct rq * rq)11432 static void rq_online_fair(struct rq *rq)
11433 {
11434     update_sysctl();
11435 
11436     update_runtime_enabled(rq);
11437 }
11438 
rq_offline_fair(struct rq * rq)11439 static void rq_offline_fair(struct rq *rq)
11440 {
11441     update_sysctl();
11442 
11443     /* Ensure any throttled groups are reachable by pick_next_task */
11444     unthrottle_offline_cfs_rqs(rq);
11445 }
11446 
11447 #ifdef CONFIG_SCHED_EAS
kick_active_balance(struct rq * rq,struct task_struct * p,int new_cpu)11448 static inline int kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
11449 {
11450     unsigned long flags;
11451     int rc = 0;
11452 
11453     if (cpu_of(rq) == new_cpu) {
11454         return rc;
11455     }
11456 
11457     /* Invoke active balance to force migrate currently running task */
11458     raw_spin_lock_irqsave(&rq->lock, flags);
11459     if (!rq->active_balance) {
11460         rq->active_balance = 1;
11461         rq->push_cpu = new_cpu;
11462         get_task_struct(p);
11463         rq->push_task = p;
11464         rc = 1;
11465     }
11466     raw_spin_unlock_irqrestore(&rq->lock, flags);
11467     return rc;
11468 }
11469 
11470 DEFINE_RAW_SPINLOCK(migration_lock);
check_for_migration_fair(struct rq * rq,struct task_struct * p)11471 static void check_for_migration_fair(struct rq *rq, struct task_struct *p)
11472 {
11473     int active_balance;
11474     int new_cpu = -1;
11475     int prev_cpu = task_cpu(p);
11476     int ret;
11477 
11478 #ifdef CONFIG_SCHED_RTG
11479     bool need_down_migrate = false;
11480     struct cpumask *rtg_target = find_rtg_target(p);
11481 
11482     if (rtg_target && (capacity_orig_of(prev_cpu) > capacity_orig_of(cpumask_first(rtg_target)))) {
11483         need_down_migrate = true;
11484     }
11485 #endif
11486 
11487     if (rq->misfit_task_load) {
11488         if (rq->curr->state != TASK_RUNNING || rq->curr->nr_cpus_allowed == 1) {
11489             return;
11490         }
11491 
11492         raw_spin_lock(&migration_lock);
11493 #ifdef CONFIG_SCHED_RTG
11494         if (rtg_target) {
11495             new_cpu = find_rtg_cpu(p);
11496             if (new_cpu != -1 && need_down_migrate && cpumask_test_cpu(new_cpu, rtg_target) && idle_cpu(new_cpu)) {
11497                 goto do_active_balance;
11498             }
11499 
11500             if (new_cpu != -1 && capacity_orig_of(new_cpu) > capacity_orig_of(prev_cpu)) {
11501                 goto do_active_balance;
11502             }
11503 
11504             goto out_unlock;
11505         }
11506 #endif
11507         rcu_read_lock();
11508         new_cpu = find_energy_efficient_cpu(p, prev_cpu);
11509         rcu_read_unlock();
11510 
11511         if (new_cpu == -1 || capacity_orig_of(new_cpu) <= capacity_orig_of(prev_cpu)) {
11512             goto out_unlock;
11513         }
11514 #ifdef CONFIG_SCHED_RTG
11515     do_active_balance:
11516 #endif
11517         active_balance = kick_active_balance(rq, p, new_cpu);
11518         if (active_balance) {
11519             mark_reserved(new_cpu);
11520             raw_spin_unlock(&migration_lock);
11521             ret = stop_one_cpu_nowait(prev_cpu, active_load_balance_cpu_stop, rq, &rq->active_balance_work);
11522             if (!ret) {
11523                 clear_reserved(new_cpu);
11524             } else {
11525                 wake_up_if_idle(new_cpu);
11526             }
11527             return;
11528         }
11529     out_unlock:
11530         raw_spin_unlock(&migration_lock);
11531     }
11532 }
11533 #endif /* CONFIG_SCHED_EAS */
11534 #endif /* CONFIG_SMP */
11535 
11536 /*
11537  * scheduler tick hitting a task of our scheduling class.
11538  *
11539  * NOTE: This function can be called remotely by the tick offload that
11540  * goes along full dynticks. Therefore no local assumption can be made
11541  * and everything must be accessed through the @rq and @curr passed in
11542  * parameters.
11543  */
task_tick_fair(struct rq * rq,struct task_struct * curr,int queued)11544 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
11545 {
11546     struct cfs_rq *cfs_rq;
11547     struct sched_entity *se = &curr->se;
11548 
11549     for_each_sched_entity(se) {
11550         cfs_rq = cfs_rq_of(se);
11551         entity_tick(cfs_rq, se, queued);
11552     }
11553 
11554     if (static_branch_unlikely(&sched_numa_balancing)) {
11555         task_tick_numa(rq, curr);
11556     }
11557 
11558     update_misfit_status(curr, rq);
11559     update_overutilized_status(task_rq(curr));
11560 }
11561 
11562 /*
11563  * called on fork with the child task as argument from the parent's context
11564  *  - child not yet on the tasklist
11565  *  - preemption disabled
11566  */
task_fork_fair(struct task_struct * p)11567 static void task_fork_fair(struct task_struct *p)
11568 {
11569     struct cfs_rq *cfs_rq;
11570     struct sched_entity *se = &p->se, *curr;
11571     struct rq *rq = this_rq();
11572     struct rq_flags rf;
11573 
11574     rq_lock(rq, &rf);
11575     update_rq_clock(rq);
11576 
11577     cfs_rq = task_cfs_rq(current);
11578     curr = cfs_rq->curr;
11579     if (curr) {
11580         update_curr(cfs_rq);
11581         se->vruntime = curr->vruntime;
11582     }
11583     place_entity(cfs_rq, se, 1);
11584 
11585     if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
11586         /*
11587          * Upon rescheduling, sched_class::put_prev_task() will place
11588          * 'current' within the tree based on its new key value.
11589          */
11590         swap(curr->vruntime, se->vruntime);
11591         resched_curr(rq);
11592     }
11593 
11594     se->vruntime -= cfs_rq->min_vruntime;
11595     rq_unlock(rq, &rf);
11596 }
11597 
11598 /*
11599  * Priority of the task has changed. Check to see if we preempt
11600  * the current task.
11601  */
prio_changed_fair(struct rq * rq,struct task_struct * p,int oldprio)11602 static void prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
11603 {
11604     if (!task_on_rq_queued(p)) {
11605         return;
11606     }
11607 
11608     if (rq->cfs.nr_running == 1) {
11609         return;
11610     }
11611 
11612     /*
11613      * Reschedule if we are currently running on this runqueue and
11614      * our priority decreased, or if we are not currently running on
11615      * this runqueue and our priority is higher than the current's
11616      */
11617     if (rq->curr == p) {
11618         if (p->prio > oldprio) {
11619             resched_curr(rq);
11620         }
11621     } else {
11622         check_preempt_curr(rq, p, 0);
11623     }
11624 }
11625 
vruntime_normalized(struct task_struct * p)11626 static inline bool vruntime_normalized(struct task_struct *p)
11627 {
11628     struct sched_entity *se = &p->se;
11629 
11630     /*
11631      * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
11632      * the dequeue_entity(.flags=0) will already have normalized the
11633      * vruntime.
11634      */
11635     if (p->on_rq) {
11636         return true;
11637     }
11638 
11639     /*
11640      * When !on_rq, vruntime of the task has usually NOT been normalized.
11641      * But there are some cases where it has already been normalized:
11642      *
11643      * - A forked child which is waiting for being woken up by
11644      *   wake_up_new_task().
11645      * - A task which has been woken up by try_to_wake_up() and
11646      *   waiting for actually being woken up by sched_ttwu_pending().
11647      */
11648     if (!se->sum_exec_runtime || (p->state == TASK_WAKING && p->sched_remote_wakeup)) {
11649         return true;
11650     }
11651 
11652     return false;
11653 }
11654 
11655 #ifdef CONFIG_FAIR_GROUP_SCHED
11656 /*
11657  * Propagate the changes of the sched_entity across the tg tree to make it
11658  * visible to the root
11659  */
propagate_entity_cfs_rq(struct sched_entity * se)11660 static void propagate_entity_cfs_rq(struct sched_entity *se)
11661 {
11662     struct cfs_rq *cfs_rq;
11663 
11664     list_add_leaf_cfs_rq(cfs_rq_of(se));
11665 
11666     /* Start to propagate at parent */
11667     se = se->parent;
11668 
11669     for_each_sched_entity(se) {
11670         cfs_rq = cfs_rq_of(se);
11671         if (!cfs_rq_throttled(cfs_rq)) {
11672             update_load_avg(cfs_rq, se, UPDATE_TG);
11673             list_add_leaf_cfs_rq(cfs_rq);
11674             continue;
11675         }
11676 
11677         if (list_add_leaf_cfs_rq(cfs_rq)) {
11678             break;
11679         }
11680     }
11681 }
11682 #else
propagate_entity_cfs_rq(struct sched_entity * se)11683 static void propagate_entity_cfs_rq(struct sched_entity *se)
11684 {
11685 }
11686 #endif
11687 
detach_entity_cfs_rq(struct sched_entity * se)11688 static void detach_entity_cfs_rq(struct sched_entity *se)
11689 {
11690     struct cfs_rq *cfs_rq = cfs_rq_of(se);
11691 
11692     /* Catch up with the cfs_rq and remove our load when we leave */
11693     update_load_avg(cfs_rq, se, 0);
11694     detach_entity_load_avg(cfs_rq, se);
11695     update_tg_load_avg(cfs_rq);
11696     propagate_entity_cfs_rq(se);
11697 }
11698 
attach_entity_cfs_rq(struct sched_entity * se)11699 static void attach_entity_cfs_rq(struct sched_entity *se)
11700 {
11701     struct cfs_rq *cfs_rq = cfs_rq_of(se);
11702 
11703 #ifdef CONFIG_FAIR_GROUP_SCHED
11704     /*
11705      * Since the real-depth could have been changed (only FAIR
11706      * class maintain depth value), reset depth properly.
11707      */
11708     se->depth = se->parent ? se->parent->depth + 1 : 0;
11709 #endif
11710 
11711     /* Synchronize entity with its cfs_rq */
11712     update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
11713     attach_entity_load_avg(cfs_rq, se);
11714     update_tg_load_avg(cfs_rq);
11715     propagate_entity_cfs_rq(se);
11716 }
11717 
detach_task_cfs_rq(struct task_struct * p)11718 static void detach_task_cfs_rq(struct task_struct *p)
11719 {
11720     struct sched_entity *se = &p->se;
11721     struct cfs_rq *cfs_rq = cfs_rq_of(se);
11722 
11723     if (!vruntime_normalized(p)) {
11724         /*
11725          * Fix up our vruntime so that the current sleep doesn't
11726          * cause 'unlimited' sleep bonus.
11727          */
11728         place_entity(cfs_rq, se, 0);
11729         se->vruntime -= cfs_rq->min_vruntime;
11730     }
11731 
11732     detach_entity_cfs_rq(se);
11733 }
11734 
attach_task_cfs_rq(struct task_struct * p)11735 static void attach_task_cfs_rq(struct task_struct *p)
11736 {
11737     struct sched_entity *se = &p->se;
11738     struct cfs_rq *cfs_rq = cfs_rq_of(se);
11739 
11740     attach_entity_cfs_rq(se);
11741 
11742     if (!vruntime_normalized(p)) {
11743         se->vruntime += cfs_rq->min_vruntime;
11744     }
11745 }
11746 
switched_from_fair(struct rq * rq,struct task_struct * p)11747 static void switched_from_fair(struct rq *rq, struct task_struct *p)
11748 {
11749     detach_task_cfs_rq(p);
11750 }
11751 
switched_to_fair(struct rq * rq,struct task_struct * p)11752 static void switched_to_fair(struct rq *rq, struct task_struct *p)
11753 {
11754     attach_task_cfs_rq(p);
11755 
11756     if (task_on_rq_queued(p)) {
11757         /*
11758          * We were most likely switched from sched_rt, so
11759          * kick off the schedule if running, otherwise just see
11760          * if we can still preempt the current task.
11761          */
11762         if (rq->curr == p) {
11763             resched_curr(rq);
11764         } else {
11765             check_preempt_curr(rq, p, 0);
11766         }
11767     }
11768 }
11769 
11770 /* Account for a task changing its policy or group.
11771  *
11772  * This routine is mostly called to set cfs_rq->curr field when a task
11773  * migrates between groups/classes.
11774  */
set_next_task_fair(struct rq * rq,struct task_struct * p,bool first)11775 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
11776 {
11777     struct sched_entity *se = &p->se;
11778 
11779 #ifdef CONFIG_SMP
11780     if (task_on_rq_queued(p)) {
11781         /*
11782          * Move the next running task to the front of the list, so our
11783          * cfs_tasks list becomes MRU one.
11784          */
11785         list_move(&se->group_node, &rq->cfs_tasks);
11786     }
11787 #endif
11788 
11789     for_each_sched_entity(se) {
11790         struct cfs_rq *cfs_rq = cfs_rq_of(se);
11791 
11792         set_next_entity(cfs_rq, se);
11793         /* ensure bandwidth has been allocated on our new cfs_rq */
11794         account_cfs_rq_runtime(cfs_rq, 0);
11795     }
11796 }
11797 
init_cfs_rq(struct cfs_rq * cfs_rq)11798 void init_cfs_rq(struct cfs_rq *cfs_rq)
11799 {
11800     cfs_rq->tasks_timeline = RB_ROOT_CACHED;
11801     cfs_rq->min_vruntime = (u64)(-(1LL << FAIR_TWENTY));
11802 #ifndef CONFIG_64BIT
11803     cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
11804 #endif
11805 #ifdef CONFIG_SMP
11806     raw_spin_lock_init(&cfs_rq->removed.lock);
11807 #endif
11808 }
11809 
11810 #ifdef CONFIG_FAIR_GROUP_SCHED
task_set_group_fair(struct task_struct * p)11811 static void task_set_group_fair(struct task_struct *p)
11812 {
11813     struct sched_entity *se = &p->se;
11814 
11815     set_task_rq(p, task_cpu(p));
11816     se->depth = se->parent ? se->parent->depth + 1 : 0;
11817 }
11818 
task_move_group_fair(struct task_struct * p)11819 static void task_move_group_fair(struct task_struct *p)
11820 {
11821     detach_task_cfs_rq(p);
11822     set_task_rq(p, task_cpu(p));
11823 
11824 #ifdef CONFIG_SMP
11825     /* Tell se's cfs_rq has been changed -- migrated */
11826     p->se.avg.last_update_time = 0;
11827 #endif
11828     attach_task_cfs_rq(p);
11829 }
11830 
task_change_group_fair(struct task_struct * p,int type)11831 static void task_change_group_fair(struct task_struct *p, int type)
11832 {
11833     switch (type) {
11834         case TASK_SET_GROUP:
11835             task_set_group_fair(p);
11836             break;
11837 
11838         case TASK_MOVE_GROUP:
11839             task_move_group_fair(p);
11840             break;
11841     }
11842 }
11843 
free_fair_sched_group(struct task_group * tg)11844 void free_fair_sched_group(struct task_group *tg)
11845 {
11846     int i;
11847 
11848     destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
11849 
11850     for_each_possible_cpu(i)
11851     {
11852         if (tg->cfs_rq) {
11853             kfree(tg->cfs_rq[i]);
11854         }
11855         if (tg->se) {
11856             kfree(tg->se[i]);
11857         }
11858     }
11859 
11860     kfree(tg->cfs_rq);
11861     kfree(tg->se);
11862 }
11863 
alloc_fair_sched_group(struct task_group * tg,struct task_group * parent)11864 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
11865 {
11866     struct sched_entity *se;
11867     struct cfs_rq *cfs_rq;
11868     int i;
11869 
11870     tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
11871     if (!tg->cfs_rq) {
11872         goto err;
11873     }
11874     tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
11875     if (!tg->se) {
11876         goto err;
11877     }
11878 
11879     tg->shares = NICE_0_LOAD;
11880 
11881     init_cfs_bandwidth(tg_cfs_bandwidth(tg));
11882 
11883     for_each_possible_cpu(i)
11884     {
11885         cfs_rq = kzalloc_node(sizeof(struct cfs_rq), GFP_KERNEL, cpu_to_node(i));
11886         if (!cfs_rq) {
11887             goto err;
11888         }
11889 
11890         se = kzalloc_node(sizeof(struct sched_entity), GFP_KERNEL, cpu_to_node(i));
11891         if (!se) {
11892             goto err_free_rq;
11893         }
11894 
11895         init_cfs_rq(cfs_rq);
11896         init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
11897         init_entity_runnable_average(se);
11898     }
11899 
11900     return 1;
11901 
11902 err_free_rq:
11903     kfree(cfs_rq);
11904 err:
11905     return 0;
11906 }
11907 
online_fair_sched_group(struct task_group * tg)11908 void online_fair_sched_group(struct task_group *tg)
11909 {
11910     struct sched_entity *se;
11911     struct rq_flags rf;
11912     struct rq *rq;
11913     int i;
11914 
11915     for_each_possible_cpu(i)
11916     {
11917         rq = cpu_rq(i);
11918         se = tg->se[i];
11919         rq_lock_irq(rq, &rf);
11920         update_rq_clock(rq);
11921         attach_entity_cfs_rq(se);
11922         sync_throttle(tg, i);
11923         rq_unlock_irq(rq, &rf);
11924     }
11925 }
11926 
unregister_fair_sched_group(struct task_group * tg)11927 void unregister_fair_sched_group(struct task_group *tg)
11928 {
11929     unsigned long flags;
11930     struct rq *rq;
11931     int cpu;
11932 
11933     for_each_possible_cpu(cpu)
11934     {
11935         if (tg->se[cpu]) {
11936             remove_entity_load_avg(tg->se[cpu]);
11937         }
11938 
11939         /*
11940          * Only empty task groups can be destroyed; so we can speculatively
11941          * check on_list without danger of it being re-added.
11942          */
11943         if (!tg->cfs_rq[cpu]->on_list) {
11944             continue;
11945         }
11946 
11947         rq = cpu_rq(cpu);
11948 
11949         raw_spin_lock_irqsave(&rq->lock, flags);
11950         list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
11951         raw_spin_unlock_irqrestore(&rq->lock, flags);
11952     }
11953 }
11954 
init_tg_cfs_entry(struct task_group * tg,struct cfs_rq * cfs_rq,struct sched_entity * se,int cpu,struct sched_entity * parent)11955 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu,
11956                        struct sched_entity *parent)
11957 {
11958     struct rq *rq = cpu_rq(cpu);
11959 
11960     cfs_rq->tg = tg;
11961     cfs_rq->rq = rq;
11962     init_cfs_rq_runtime(cfs_rq);
11963 
11964     tg->cfs_rq[cpu] = cfs_rq;
11965     tg->se[cpu] = se;
11966 
11967     /* se could be NULL for root_task_group */
11968     if (!se) {
11969         return;
11970     }
11971 
11972     if (!parent) {
11973         se->cfs_rq = &rq->cfs;
11974         se->depth = 0;
11975     } else {
11976         se->cfs_rq = parent->my_q;
11977         se->depth = parent->depth + 1;
11978     }
11979 
11980     se->my_q = cfs_rq;
11981     /* guarantee group entities always have weight */
11982     update_load_set(&se->load, NICE_0_LOAD);
11983     se->parent = parent;
11984 }
11985 
11986 static DEFINE_MUTEX(shares_mutex);
11987 
sched_group_set_shares(struct task_group * tg,unsigned long shares)11988 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
11989 {
11990     int i;
11991 
11992     /*
11993      * We can't change the weight of the root cgroup.
11994      */
11995     if (!tg->se[0]) {
11996         return -EINVAL;
11997     }
11998 
11999     shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
12000 
12001     mutex_lock(&shares_mutex);
12002     if (tg->shares == shares) {
12003         goto done;
12004     }
12005 
12006     tg->shares = shares;
12007     for_each_possible_cpu(i) {
12008         struct rq *rq = cpu_rq(i);
12009         struct sched_entity *se = tg->se[i];
12010         struct rq_flags rf;
12011 
12012         /* Propagate contribution to hierarchy */
12013         rq_lock_irqsave(rq, &rf);
12014         update_rq_clock(rq);
12015         for_each_sched_entity(se) {
12016             update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
12017             update_cfs_group(se);
12018         }
12019         rq_unlock_irqrestore(rq, &rf);
12020     }
12021 
12022 done:
12023     mutex_unlock(&shares_mutex);
12024     return 0;
12025 }
12026 #else /* CONFIG_FAIR_GROUP_SCHED */
12027 
free_fair_sched_group(struct task_group * tg)12028 void free_fair_sched_group(struct task_group *tg)
12029 {
12030 }
12031 
alloc_fair_sched_group(struct task_group * tg,struct task_group * parent)12032 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
12033 {
12034     return 1;
12035 }
12036 
online_fair_sched_group(struct task_group * tg)12037 void online_fair_sched_group(struct task_group *tg)
12038 {
12039 }
12040 
unregister_fair_sched_group(struct task_group * tg)12041 void unregister_fair_sched_group(struct task_group *tg)
12042 {
12043 }
12044 
12045 #endif /* CONFIG_FAIR_GROUP_SCHED */
12046 
get_rr_interval_fair(struct rq * rq,struct task_struct * task)12047 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
12048 {
12049     struct sched_entity *se = &task->se;
12050     unsigned int rr_interval = 0;
12051 
12052     /*
12053      * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
12054      * idle runqueue:
12055      */
12056     if (rq->cfs.load.weight) {
12057         rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
12058     }
12059 
12060     return rr_interval;
12061 }
12062 
12063 /*
12064  * All the scheduling class methods:
12065  */
12066 const struct sched_class fair_sched_class __section("__fair_sched_class") = {
12067     .enqueue_task = enqueue_task_fair,
12068     .dequeue_task = dequeue_task_fair,
12069     .yield_task = yield_task_fair,
12070     .yield_to_task = yield_to_task_fair,
12071 
12072     .check_preempt_curr = check_preempt_wakeup,
12073 
12074     .pick_next_task = fair_pick_next_task_fair,
12075     .put_prev_task = put_prev_task_fair,
12076     .set_next_task = set_next_task_fair,
12077 
12078 #ifdef CONFIG_SMP
12079     .balance = balance_fair,
12080     .select_task_rq = select_task_rq_fair,
12081     .migrate_task_rq = migrate_task_rq_fair,
12082 
12083     .rq_online = rq_online_fair,
12084     .rq_offline = rq_offline_fair,
12085 
12086     .task_dead = task_dead_fair,
12087     .set_cpus_allowed = set_cpus_allowed_common,
12088 #endif
12089 
12090     .task_tick = task_tick_fair,
12091     .task_fork = task_fork_fair,
12092 
12093     .prio_changed = prio_changed_fair,
12094     .switched_from = switched_from_fair,
12095     .switched_to = switched_to_fair,
12096 
12097     .get_rr_interval = get_rr_interval_fair,
12098 
12099     .update_curr = update_curr_fair,
12100 
12101 #ifdef CONFIG_FAIR_GROUP_SCHED
12102     .task_change_group = task_change_group_fair,
12103 #endif
12104 
12105 #ifdef CONFIG_UCLAMP_TASK
12106     .uclamp_enabled = 1,
12107 #endif
12108 #ifdef CONFIG_SCHED_WALT
12109     .fixup_walt_sched_stats = walt_fixup_sched_stats_fair,
12110 #endif
12111 #ifdef CONFIG_SCHED_EAS
12112     .check_for_migration = check_for_migration_fair,
12113 #endif
12114 };
12115 
12116 #ifdef CONFIG_SCHED_DEBUG
print_cfs_stats(struct seq_file * m,int cpu)12117 void print_cfs_stats(struct seq_file *m, int cpu)
12118 {
12119     struct cfs_rq *cfs_rq, *pos;
12120 
12121     rcu_read_lock();
12122     for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) print_cfs_rq(m, cpu, cfs_rq);
12123     rcu_read_unlock();
12124 }
12125 
12126 #ifdef CONFIG_NUMA_BALANCING
show_numa_stats(struct task_struct * p,struct seq_file * m)12127 void show_numa_stats(struct task_struct *p, struct seq_file *m)
12128 {
12129     int node;
12130     unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
12131     struct numa_group *ng;
12132 
12133     rcu_read_lock();
12134     ng = rcu_dereference(p->numa_group);
12135     for_each_online_node(node)
12136     {
12137         if (p->numa_faults) {
12138             tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
12139             tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
12140         }
12141         if (ng) {
12142             gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)], gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
12143         }
12144         print_numa_stats(m, node, tsf, tpf, gsf, gpf);
12145     }
12146     rcu_read_unlock();
12147 }
12148 #endif /* CONFIG_NUMA_BALANCING */
12149 #endif /* CONFIG_SCHED_DEBUG */
12150 
init_sched_fair_class(void)12151 __init void init_sched_fair_class(void)
12152 {
12153 #ifdef CONFIG_SMP
12154     open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
12155 
12156 #ifdef CONFIG_NO_HZ_COMMON
12157     nohz.next_balance = jiffies;
12158     nohz.next_blocked = jiffies;
12159     zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
12160 #endif
12161 #endif /* SMP */
12162 }
12163 
12164 /* WALT sched implementation begins here */
12165 #ifdef CONFIG_SCHED_WALT
12166 
12167 #ifdef CONFIG_CFS_BANDWIDTH
12168 
walt_init_cfs_rq_stats(struct cfs_rq * cfs_rq)12169 static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq)
12170 {
12171     cfs_rq->walt_stats.cumulative_runnable_avg_scaled = 0;
12172 }
12173 
walt_inc_cfs_rq_stats(struct cfs_rq * cfs_rq,struct task_struct * p)12174 static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p)
12175 {
12176     fixup_cumulative_runnable_avg(&cfs_rq->walt_stats, p->ravg.demand_scaled);
12177 }
12178 
walt_dec_cfs_rq_stats(struct cfs_rq * cfs_rq,struct task_struct * p)12179 static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p)
12180 {
12181     fixup_cumulative_runnable_avg(&cfs_rq->walt_stats, -(s64)p->ravg.demand_scaled);
12182 }
12183 
walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats * stats,struct cfs_rq * tcfs_rq)12184 static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats, struct cfs_rq *tcfs_rq)
12185 {
12186     struct rq *rq = rq_of(tcfs_rq);
12187 
12188     fixup_cumulative_runnable_avg(stats, tcfs_rq->walt_stats.cumulative_runnable_avg_scaled);
12189 
12190     if (stats == &rq->walt_stats) {
12191         walt_fixup_cum_window_demand(rq, tcfs_rq->walt_stats.cumulative_runnable_avg_scaled);
12192     }
12193 }
12194 
walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats * stats,struct cfs_rq * tcfs_rq)12195 static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats, struct cfs_rq *tcfs_rq)
12196 {
12197     struct rq *rq = rq_of(tcfs_rq);
12198 
12199     fixup_cumulative_runnable_avg(stats, -tcfs_rq->walt_stats.cumulative_runnable_avg_scaled);
12200 
12201     /*
12202      * We remove the throttled cfs_rq's tasks's contribution from the
12203      * cumulative window demand so that the same can be added
12204      * unconditionally when the cfs_rq is unthrottled.
12205      */
12206     if (stats == &rq->walt_stats) {
12207         walt_fixup_cum_window_demand(rq, -tcfs_rq->walt_stats.cumulative_runnable_avg_scaled);
12208     }
12209 }
12210 
walt_fixup_sched_stats_fair(struct rq * rq,struct task_struct * p,u16 updated_demand_scaled)12211 static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p, u16 updated_demand_scaled)
12212 {
12213     struct cfs_rq *cfs_rq;
12214     struct sched_entity *se = &p->se;
12215     s64 task_load_delta = (s64)updated_demand_scaled - p->ravg.demand_scaled;
12216 
12217     for_each_sched_entity(se) {
12218         cfs_rq = cfs_rq_of(se);
12219 
12220         fixup_cumulative_runnable_avg(&cfs_rq->walt_stats, task_load_delta);
12221         if (cfs_rq_throttled(cfs_rq)) {
12222             break;
12223         }
12224     }
12225 
12226     /* Fix up rq->walt_stats only if we didn't find any throttled cfs_rq */
12227     if (!se) {
12228         fixup_cumulative_runnable_avg(&rq->walt_stats, task_load_delta);
12229         walt_fixup_cum_window_demand(rq, task_load_delta);
12230     }
12231 }
12232 
12233 #else  /* CONFIG_CFS_BANDWIDTH */
walt_fixup_sched_stats_fair(struct rq * rq,struct task_struct * p,u16 updated_demand_scaled)12234 static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p, u16 updated_demand_scaled)
12235 {
12236     fixup_walt_sched_stats_common(rq, p, updated_demand_scaled);
12237 }
12238 #endif /* CONFIG_CFS_BANDWIDTH */
12239 #endif /* CONFIG_SCHED_WALT */
12240 
12241 /*
12242  * Helper functions to facilitate extracting info from tracepoints.
12243  */
12244 
sched_trace_cfs_rq_avg(struct cfs_rq * cfs_rq)12245 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq)
12246 {
12247 #ifdef CONFIG_SMP
12248     return cfs_rq ? &cfs_rq->avg : NULL;
12249 #else
12250     return NULL;
12251 #endif
12252 }
12253 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_avg);
12254 
sched_trace_cfs_rq_path(struct cfs_rq * cfs_rq,char * str,int len)12255 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len)
12256 {
12257     if (!cfs_rq) {
12258         if (str) {
12259             strlcpy(str, "(null)", len);
12260         } else {
12261             return NULL;
12262         }
12263     }
12264 
12265     cfs_rq_tg_path(cfs_rq, str, len);
12266     return str;
12267 }
12268 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_path);
12269 
sched_trace_cfs_rq_cpu(struct cfs_rq * cfs_rq)12270 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq)
12271 {
12272     return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1;
12273 }
12274 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_cpu);
12275 
sched_trace_rq_avg_rt(struct rq * rq)12276 const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq)
12277 {
12278 #ifdef CONFIG_SMP
12279     return rq ? &rq->avg_rt : NULL;
12280 #else
12281     return NULL;
12282 #endif
12283 }
12284 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_rt);
12285 
sched_trace_rq_avg_dl(struct rq * rq)12286 const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq)
12287 {
12288 #ifdef CONFIG_SMP
12289     return rq ? &rq->avg_dl : NULL;
12290 #else
12291     return NULL;
12292 #endif
12293 }
12294 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_dl);
12295 
sched_trace_rq_avg_irq(struct rq * rq)12296 const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq)
12297 {
12298 #if defined(CONFIG_SMP) && defined(CONFIG_HAVE_SCHED_AVG_IRQ)
12299     return rq ? &rq->avg_irq : NULL;
12300 #else
12301     return NULL;
12302 #endif
12303 }
12304 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_irq);
12305 
sched_trace_rq_cpu(struct rq * rq)12306 int sched_trace_rq_cpu(struct rq *rq)
12307 {
12308     return rq ? cpu_of(rq) : -1;
12309 }
12310 EXPORT_SYMBOL_GPL(sched_trace_rq_cpu);
12311 
sched_trace_rq_cpu_capacity(struct rq * rq)12312 int sched_trace_rq_cpu_capacity(struct rq *rq)
12313 {
12314     return rq ?
12315 #ifdef CONFIG_SMP
12316               rq->cpu_capacity
12317 #else
12318               SCHED_CAPACITY_SCALE
12319 #endif
12320               : -1;
12321 }
12322 EXPORT_SYMBOL_GPL(sched_trace_rq_cpu_capacity);
12323 
sched_trace_rd_span(struct root_domain * rd)12324 const struct cpumask *sched_trace_rd_span(struct root_domain *rd)
12325 {
12326 #ifdef CONFIG_SMP
12327     return rd ? rd->span : NULL;
12328 #else
12329     return NULL;
12330 #endif
12331 }
12332 EXPORT_SYMBOL_GPL(sched_trace_rd_span);
12333 
sched_trace_rq_nr_running(struct rq * rq)12334 int sched_trace_rq_nr_running(struct rq *rq)
12335 {
12336     return rq ? rq->nr_running : -1;
12337 }
12338 EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running);
12339