• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3  *
4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *
6  *  Interactivity improvements by Mike Galbraith
7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
8  *
9  *  Various enhancements by Dmitry Adamushko.
10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11  *
12  *  Group scheduling enhancements by Srivatsa Vaddagiri
13  *  Copyright IBM Corporation, 2007
14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15  *
16  *  Scaled math optimizations by Thomas Gleixner
17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18  *
19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21  */
22 
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25 #include <linux/cpumask.h>
26 #include <linux/slab.h>
27 #include <linux/profile.h>
28 #include <linux/interrupt.h>
29 #include <linux/mempolicy.h>
30 #include <linux/migrate.h>
31 #include <linux/task_work.h>
32 
33 #include <trace/events/sched.h>
34 
35 #include "sched.h"
36 
37 /*
38  * Targeted preemption latency for CPU-bound tasks:
39  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
40  *
41  * NOTE: this latency value is not the same as the concept of
42  * 'timeslice length' - timeslices in CFS are of variable length
43  * and have no persistent notion like in traditional, time-slice
44  * based scheduling concepts.
45  *
46  * (to see the precise effective timeslice length of your workload,
47  *  run vmstat and monitor the context-switches (cs) field)
48  */
49 unsigned int sysctl_sched_latency = 6000000ULL;
50 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
51 
52 /*
53  * The initial- and re-scaling of tunables is configurable
54  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55  *
56  * Options are:
57  * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58  * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59  * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60  */
61 enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 	= SCHED_TUNABLESCALING_LOG;
63 
64 /*
65  * Minimal preemption granularity for CPU-bound tasks:
66  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
67  */
68 unsigned int sysctl_sched_min_granularity = 750000ULL;
69 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
70 
71 /*
72  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73  */
74 static unsigned int sched_nr_latency = 8;
75 
76 /*
77  * After fork, child runs first. If set to 0 (default) then
78  * parent will (try to) run first.
79  */
80 unsigned int sysctl_sched_child_runs_first __read_mostly;
81 
82 /*
83  * SCHED_OTHER wake-up granularity.
84  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
85  *
86  * This option delays the preemption effects of decoupled workloads
87  * and reduces their over-scheduling. Synchronous workloads will still
88  * have immediate wakeup/sleep latencies.
89  */
90 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
91 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
92 
93 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94 
95 /*
96  * The exponential sliding  window over which load is averaged for shares
97  * distribution.
98  * (default: 10msec)
99  */
100 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101 
102 #ifdef CONFIG_CFS_BANDWIDTH
103 /*
104  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105  * each time a cfs_rq requests quota.
106  *
107  * Note: in the case that the slice exceeds the runtime remaining (either due
108  * to consumption or the quota being specified to be smaller than the slice)
109  * we will always only issue the remaining available time.
110  *
111  * default: 5 msec, units: microseconds
112   */
113 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114 #endif
115 
116 /*
117  * Increase the granularity value when there are more CPUs,
118  * because with more CPUs the 'effective latency' as visible
119  * to users decreases. But the relationship is not linear,
120  * so pick a second-best guess by going with the log2 of the
121  * number of CPUs.
122  *
123  * This idea comes from the SD scheduler of Con Kolivas:
124  */
get_update_sysctl_factor(void)125 static int get_update_sysctl_factor(void)
126 {
127 	unsigned int cpus = min_t(int, num_online_cpus(), 8);
128 	unsigned int factor;
129 
130 	switch (sysctl_sched_tunable_scaling) {
131 	case SCHED_TUNABLESCALING_NONE:
132 		factor = 1;
133 		break;
134 	case SCHED_TUNABLESCALING_LINEAR:
135 		factor = cpus;
136 		break;
137 	case SCHED_TUNABLESCALING_LOG:
138 	default:
139 		factor = 1 + ilog2(cpus);
140 		break;
141 	}
142 
143 	return factor;
144 }
145 
update_sysctl(void)146 static void update_sysctl(void)
147 {
148 	unsigned int factor = get_update_sysctl_factor();
149 
150 #define SET_SYSCTL(name) \
151 	(sysctl_##name = (factor) * normalized_sysctl_##name)
152 	SET_SYSCTL(sched_min_granularity);
153 	SET_SYSCTL(sched_latency);
154 	SET_SYSCTL(sched_wakeup_granularity);
155 #undef SET_SYSCTL
156 }
157 
sched_init_granularity(void)158 void sched_init_granularity(void)
159 {
160 	update_sysctl();
161 }
162 
163 #if BITS_PER_LONG == 32
164 # define WMULT_CONST	(~0UL)
165 #else
166 # define WMULT_CONST	(1UL << 32)
167 #endif
168 
169 #define WMULT_SHIFT	32
170 
171 /*
172  * Shift right and round:
173  */
174 #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
175 
176 /*
177  * delta *= weight / lw
178  */
179 static unsigned long
calc_delta_mine(unsigned long delta_exec,unsigned long weight,struct load_weight * lw)180 calc_delta_mine(unsigned long delta_exec, unsigned long weight,
181 		struct load_weight *lw)
182 {
183 	u64 tmp;
184 
185 	/*
186 	 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
187 	 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
188 	 * 2^SCHED_LOAD_RESOLUTION.
189 	 */
190 	if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
191 		tmp = (u64)delta_exec * scale_load_down(weight);
192 	else
193 		tmp = (u64)delta_exec;
194 
195 	if (!lw->inv_weight) {
196 		unsigned long w = scale_load_down(lw->weight);
197 
198 		if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
199 			lw->inv_weight = 1;
200 		else if (unlikely(!w))
201 			lw->inv_weight = WMULT_CONST;
202 		else
203 			lw->inv_weight = WMULT_CONST / w;
204 	}
205 
206 	/*
207 	 * Check whether we'd overflow the 64-bit multiplication:
208 	 */
209 	if (unlikely(tmp > WMULT_CONST))
210 		tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
211 			WMULT_SHIFT/2);
212 	else
213 		tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
214 
215 	return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
216 }
217 
218 
219 const struct sched_class fair_sched_class;
220 
221 /**************************************************************
222  * CFS operations on generic schedulable entities:
223  */
224 
225 #ifdef CONFIG_FAIR_GROUP_SCHED
226 
227 /* cpu runqueue to which this cfs_rq is attached */
rq_of(struct cfs_rq * cfs_rq)228 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
229 {
230 	return cfs_rq->rq;
231 }
232 
233 /* An entity is a task if it doesn't "own" a runqueue */
234 #define entity_is_task(se)	(!se->my_q)
235 
task_of(struct sched_entity * se)236 static inline struct task_struct *task_of(struct sched_entity *se)
237 {
238 #ifdef CONFIG_SCHED_DEBUG
239 	WARN_ON_ONCE(!entity_is_task(se));
240 #endif
241 	return container_of(se, struct task_struct, se);
242 }
243 
244 /* Walk up scheduling entities hierarchy */
245 #define for_each_sched_entity(se) \
246 		for (; se; se = se->parent)
247 
task_cfs_rq(struct task_struct * p)248 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
249 {
250 	return p->se.cfs_rq;
251 }
252 
253 /* runqueue on which this entity is (to be) queued */
cfs_rq_of(struct sched_entity * se)254 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
255 {
256 	return se->cfs_rq;
257 }
258 
259 /* runqueue "owned" by this group */
group_cfs_rq(struct sched_entity * grp)260 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
261 {
262 	return grp->my_q;
263 }
264 
265 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
266 				       int force_update);
267 
list_add_leaf_cfs_rq(struct cfs_rq * cfs_rq)268 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
269 {
270 	if (!cfs_rq->on_list) {
271 		/*
272 		 * Ensure we either appear before our parent (if already
273 		 * enqueued) or force our parent to appear after us when it is
274 		 * enqueued.  The fact that we always enqueue bottom-up
275 		 * reduces this to two cases.
276 		 */
277 		if (cfs_rq->tg->parent &&
278 		    cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
279 			list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
280 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
281 		} else {
282 			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
283 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
284 		}
285 
286 		cfs_rq->on_list = 1;
287 		/* We should have no load, but we need to update last_decay. */
288 		update_cfs_rq_blocked_load(cfs_rq, 0);
289 	}
290 }
291 
list_del_leaf_cfs_rq(struct cfs_rq * cfs_rq)292 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
293 {
294 	if (cfs_rq->on_list) {
295 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
296 		cfs_rq->on_list = 0;
297 	}
298 }
299 
300 /* Iterate thr' all leaf cfs_rq's on a runqueue */
301 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
302 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
303 
304 /* Do the two (enqueued) entities belong to the same group ? */
305 static inline int
is_same_group(struct sched_entity * se,struct sched_entity * pse)306 is_same_group(struct sched_entity *se, struct sched_entity *pse)
307 {
308 	if (se->cfs_rq == pse->cfs_rq)
309 		return 1;
310 
311 	return 0;
312 }
313 
parent_entity(struct sched_entity * se)314 static inline struct sched_entity *parent_entity(struct sched_entity *se)
315 {
316 	return se->parent;
317 }
318 
319 /* return depth at which a sched entity is present in the hierarchy */
depth_se(struct sched_entity * se)320 static inline int depth_se(struct sched_entity *se)
321 {
322 	int depth = 0;
323 
324 	for_each_sched_entity(se)
325 		depth++;
326 
327 	return depth;
328 }
329 
330 static void
find_matching_se(struct sched_entity ** se,struct sched_entity ** pse)331 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
332 {
333 	int se_depth, pse_depth;
334 
335 	/*
336 	 * preemption test can be made between sibling entities who are in the
337 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
338 	 * both tasks until we find their ancestors who are siblings of common
339 	 * parent.
340 	 */
341 
342 	/* First walk up until both entities are at same depth */
343 	se_depth = depth_se(*se);
344 	pse_depth = depth_se(*pse);
345 
346 	while (se_depth > pse_depth) {
347 		se_depth--;
348 		*se = parent_entity(*se);
349 	}
350 
351 	while (pse_depth > se_depth) {
352 		pse_depth--;
353 		*pse = parent_entity(*pse);
354 	}
355 
356 	while (!is_same_group(*se, *pse)) {
357 		*se = parent_entity(*se);
358 		*pse = parent_entity(*pse);
359 	}
360 }
361 
362 #else	/* !CONFIG_FAIR_GROUP_SCHED */
363 
task_of(struct sched_entity * se)364 static inline struct task_struct *task_of(struct sched_entity *se)
365 {
366 	return container_of(se, struct task_struct, se);
367 }
368 
rq_of(struct cfs_rq * cfs_rq)369 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
370 {
371 	return container_of(cfs_rq, struct rq, cfs);
372 }
373 
374 #define entity_is_task(se)	1
375 
376 #define for_each_sched_entity(se) \
377 		for (; se; se = NULL)
378 
task_cfs_rq(struct task_struct * p)379 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
380 {
381 	return &task_rq(p)->cfs;
382 }
383 
cfs_rq_of(struct sched_entity * se)384 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
385 {
386 	struct task_struct *p = task_of(se);
387 	struct rq *rq = task_rq(p);
388 
389 	return &rq->cfs;
390 }
391 
392 /* runqueue "owned" by this group */
group_cfs_rq(struct sched_entity * grp)393 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
394 {
395 	return NULL;
396 }
397 
list_add_leaf_cfs_rq(struct cfs_rq * cfs_rq)398 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
399 {
400 }
401 
list_del_leaf_cfs_rq(struct cfs_rq * cfs_rq)402 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
403 {
404 }
405 
406 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
407 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
408 
409 static inline int
is_same_group(struct sched_entity * se,struct sched_entity * pse)410 is_same_group(struct sched_entity *se, struct sched_entity *pse)
411 {
412 	return 1;
413 }
414 
parent_entity(struct sched_entity * se)415 static inline struct sched_entity *parent_entity(struct sched_entity *se)
416 {
417 	return NULL;
418 }
419 
420 static inline void
find_matching_se(struct sched_entity ** se,struct sched_entity ** pse)421 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
422 {
423 }
424 
425 #endif	/* CONFIG_FAIR_GROUP_SCHED */
426 
427 static __always_inline
428 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
429 
430 /**************************************************************
431  * Scheduling class tree data structure manipulation methods:
432  */
433 
max_vruntime(u64 max_vruntime,u64 vruntime)434 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
435 {
436 	s64 delta = (s64)(vruntime - max_vruntime);
437 	if (delta > 0)
438 		max_vruntime = vruntime;
439 
440 	return max_vruntime;
441 }
442 
min_vruntime(u64 min_vruntime,u64 vruntime)443 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
444 {
445 	s64 delta = (s64)(vruntime - min_vruntime);
446 	if (delta < 0)
447 		min_vruntime = vruntime;
448 
449 	return min_vruntime;
450 }
451 
entity_before(struct sched_entity * a,struct sched_entity * b)452 static inline int entity_before(struct sched_entity *a,
453 				struct sched_entity *b)
454 {
455 	return (s64)(a->vruntime - b->vruntime) < 0;
456 }
457 
update_min_vruntime(struct cfs_rq * cfs_rq)458 static void update_min_vruntime(struct cfs_rq *cfs_rq)
459 {
460 	u64 vruntime = cfs_rq->min_vruntime;
461 
462 	if (cfs_rq->curr)
463 		vruntime = cfs_rq->curr->vruntime;
464 
465 	if (cfs_rq->rb_leftmost) {
466 		struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
467 						   struct sched_entity,
468 						   run_node);
469 
470 		if (!cfs_rq->curr)
471 			vruntime = se->vruntime;
472 		else
473 			vruntime = min_vruntime(vruntime, se->vruntime);
474 	}
475 
476 	/* ensure we never gain time by being placed backwards. */
477 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
478 #ifndef CONFIG_64BIT
479 	smp_wmb();
480 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
481 #endif
482 }
483 
484 /*
485  * Enqueue an entity into the rb-tree:
486  */
__enqueue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)487 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
488 {
489 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
490 	struct rb_node *parent = NULL;
491 	struct sched_entity *entry;
492 	int leftmost = 1;
493 
494 	/*
495 	 * Find the right place in the rbtree:
496 	 */
497 	while (*link) {
498 		parent = *link;
499 		entry = rb_entry(parent, struct sched_entity, run_node);
500 		/*
501 		 * We dont care about collisions. Nodes with
502 		 * the same key stay together.
503 		 */
504 		if (entity_before(se, entry)) {
505 			link = &parent->rb_left;
506 		} else {
507 			link = &parent->rb_right;
508 			leftmost = 0;
509 		}
510 	}
511 
512 	/*
513 	 * Maintain a cache of leftmost tree entries (it is frequently
514 	 * used):
515 	 */
516 	if (leftmost)
517 		cfs_rq->rb_leftmost = &se->run_node;
518 
519 	rb_link_node(&se->run_node, parent, link);
520 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
521 }
522 
__dequeue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)523 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
524 {
525 	if (cfs_rq->rb_leftmost == &se->run_node) {
526 		struct rb_node *next_node;
527 
528 		next_node = rb_next(&se->run_node);
529 		cfs_rq->rb_leftmost = next_node;
530 	}
531 
532 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
533 }
534 
__pick_first_entity(struct cfs_rq * cfs_rq)535 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
536 {
537 	struct rb_node *left = cfs_rq->rb_leftmost;
538 
539 	if (!left)
540 		return NULL;
541 
542 	return rb_entry(left, struct sched_entity, run_node);
543 }
544 
__pick_next_entity(struct sched_entity * se)545 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
546 {
547 	struct rb_node *next = rb_next(&se->run_node);
548 
549 	if (!next)
550 		return NULL;
551 
552 	return rb_entry(next, struct sched_entity, run_node);
553 }
554 
555 #ifdef CONFIG_SCHED_DEBUG
__pick_last_entity(struct cfs_rq * cfs_rq)556 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
557 {
558 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
559 
560 	if (!last)
561 		return NULL;
562 
563 	return rb_entry(last, struct sched_entity, run_node);
564 }
565 
566 /**************************************************************
567  * Scheduling class statistics methods:
568  */
569 
sched_proc_update_handler(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)570 int sched_proc_update_handler(struct ctl_table *table, int write,
571 		void __user *buffer, size_t *lenp,
572 		loff_t *ppos)
573 {
574 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
575 	int factor = get_update_sysctl_factor();
576 
577 	if (ret || !write)
578 		return ret;
579 
580 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
581 					sysctl_sched_min_granularity);
582 
583 #define WRT_SYSCTL(name) \
584 	(normalized_sysctl_##name = sysctl_##name / (factor))
585 	WRT_SYSCTL(sched_min_granularity);
586 	WRT_SYSCTL(sched_latency);
587 	WRT_SYSCTL(sched_wakeup_granularity);
588 #undef WRT_SYSCTL
589 
590 	return 0;
591 }
592 #endif
593 
594 /*
595  * delta /= w
596  */
597 static inline unsigned long
calc_delta_fair(unsigned long delta,struct sched_entity * se)598 calc_delta_fair(unsigned long delta, struct sched_entity *se)
599 {
600 	if (unlikely(se->load.weight != NICE_0_LOAD))
601 		delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
602 
603 	return delta;
604 }
605 
606 /*
607  * The idea is to set a period in which each task runs once.
608  *
609  * When there are too many tasks (sched_nr_latency) we have to stretch
610  * this period because otherwise the slices get too small.
611  *
612  * p = (nr <= nl) ? l : l*nr/nl
613  */
__sched_period(unsigned long nr_running)614 static u64 __sched_period(unsigned long nr_running)
615 {
616 	u64 period = sysctl_sched_latency;
617 	unsigned long nr_latency = sched_nr_latency;
618 
619 	if (unlikely(nr_running > nr_latency)) {
620 		period = sysctl_sched_min_granularity;
621 		period *= nr_running;
622 	}
623 
624 	return period;
625 }
626 
627 /*
628  * We calculate the wall-time slice from the period by taking a part
629  * proportional to the weight.
630  *
631  * s = p*P[w/rw]
632  */
sched_slice(struct cfs_rq * cfs_rq,struct sched_entity * se)633 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
634 {
635 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
636 
637 	for_each_sched_entity(se) {
638 		struct load_weight *load;
639 		struct load_weight lw;
640 
641 		cfs_rq = cfs_rq_of(se);
642 		load = &cfs_rq->load;
643 
644 		if (unlikely(!se->on_rq)) {
645 			lw = cfs_rq->load;
646 
647 			update_load_add(&lw, se->load.weight);
648 			load = &lw;
649 		}
650 		slice = calc_delta_mine(slice, se->load.weight, load);
651 	}
652 	return slice;
653 }
654 
655 /*
656  * We calculate the vruntime slice of a to-be-inserted task.
657  *
658  * vs = s/w
659  */
sched_vslice(struct cfs_rq * cfs_rq,struct sched_entity * se)660 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
661 {
662 	return calc_delta_fair(sched_slice(cfs_rq, se), se);
663 }
664 
665 /*
666  * Update the current task's runtime statistics. Skip current tasks that
667  * are not in our scheduling class.
668  */
669 static inline void
__update_curr(struct cfs_rq * cfs_rq,struct sched_entity * curr,unsigned long delta_exec)670 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
671 	      unsigned long delta_exec)
672 {
673 	unsigned long delta_exec_weighted;
674 
675 	schedstat_set(curr->statistics.exec_max,
676 		      max((u64)delta_exec, curr->statistics.exec_max));
677 
678 	curr->sum_exec_runtime += delta_exec;
679 	schedstat_add(cfs_rq, exec_clock, delta_exec);
680 	delta_exec_weighted = calc_delta_fair(delta_exec, curr);
681 
682 	curr->vruntime += delta_exec_weighted;
683 	update_min_vruntime(cfs_rq);
684 }
685 
update_curr(struct cfs_rq * cfs_rq)686 static void update_curr(struct cfs_rq *cfs_rq)
687 {
688 	struct sched_entity *curr = cfs_rq->curr;
689 	u64 now = rq_of(cfs_rq)->clock_task;
690 	unsigned long delta_exec;
691 
692 	if (unlikely(!curr))
693 		return;
694 
695 	/*
696 	 * Get the amount of time the current task was running
697 	 * since the last time we changed load (this cannot
698 	 * overflow on 32 bits):
699 	 */
700 	delta_exec = (unsigned long)(now - curr->exec_start);
701 	if (!delta_exec)
702 		return;
703 
704 	__update_curr(cfs_rq, curr, delta_exec);
705 	curr->exec_start = now;
706 
707 	if (entity_is_task(curr)) {
708 		struct task_struct *curtask = task_of(curr);
709 
710 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
711 		cpuacct_charge(curtask, delta_exec);
712 		account_group_exec_runtime(curtask, delta_exec);
713 	}
714 
715 	account_cfs_rq_runtime(cfs_rq, delta_exec);
716 }
717 
718 static inline void
update_stats_wait_start(struct cfs_rq * cfs_rq,struct sched_entity * se)719 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
720 {
721 	schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
722 }
723 
724 /*
725  * Task is being enqueued - update stats:
726  */
update_stats_enqueue(struct cfs_rq * cfs_rq,struct sched_entity * se)727 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
728 {
729 	/*
730 	 * Are we enqueueing a waiting task? (for current tasks
731 	 * a dequeue/enqueue event is a NOP)
732 	 */
733 	if (se != cfs_rq->curr)
734 		update_stats_wait_start(cfs_rq, se);
735 }
736 
737 static void
update_stats_wait_end(struct cfs_rq * cfs_rq,struct sched_entity * se)738 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
739 {
740 	schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
741 			rq_of(cfs_rq)->clock - se->statistics.wait_start));
742 	schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
743 	schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
744 			rq_of(cfs_rq)->clock - se->statistics.wait_start);
745 #ifdef CONFIG_SCHEDSTATS
746 	if (entity_is_task(se)) {
747 		trace_sched_stat_wait(task_of(se),
748 			rq_of(cfs_rq)->clock - se->statistics.wait_start);
749 	}
750 #endif
751 	schedstat_set(se->statistics.wait_start, 0);
752 }
753 
754 static inline void
update_stats_dequeue(struct cfs_rq * cfs_rq,struct sched_entity * se)755 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
756 {
757 	/*
758 	 * Mark the end of the wait period if dequeueing a
759 	 * waiting task:
760 	 */
761 	if (se != cfs_rq->curr)
762 		update_stats_wait_end(cfs_rq, se);
763 }
764 
765 /*
766  * We are picking a new current task - update its stats:
767  */
768 static inline void
update_stats_curr_start(struct cfs_rq * cfs_rq,struct sched_entity * se)769 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
770 {
771 	/*
772 	 * We are starting a new run period:
773 	 */
774 	se->exec_start = rq_of(cfs_rq)->clock_task;
775 }
776 
777 /**************************************************
778  * Scheduling class queueing methods:
779  */
780 
781 #ifdef CONFIG_NUMA_BALANCING
782 /*
783  * numa task sample period in ms
784  */
785 unsigned int sysctl_numa_balancing_scan_period_min = 100;
786 unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
787 unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
788 
789 /* Portion of address space to scan in MB */
790 unsigned int sysctl_numa_balancing_scan_size = 256;
791 
792 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
793 unsigned int sysctl_numa_balancing_scan_delay = 1000;
794 
task_numa_placement(struct task_struct * p)795 static void task_numa_placement(struct task_struct *p)
796 {
797 	int seq;
798 
799 	if (!p->mm)	/* for example, ksmd faulting in a user's mm */
800 		return;
801 	seq = ACCESS_ONCE(p->mm->numa_scan_seq);
802 	if (p->numa_scan_seq == seq)
803 		return;
804 	p->numa_scan_seq = seq;
805 
806 	/* FIXME: Scheduling placement policy hints go here */
807 }
808 
809 /*
810  * Got a PROT_NONE fault for a page on @node.
811  */
task_numa_fault(int node,int pages,bool migrated)812 void task_numa_fault(int node, int pages, bool migrated)
813 {
814 	struct task_struct *p = current;
815 
816 	if (!sched_feat_numa(NUMA))
817 		return;
818 
819 	/* FIXME: Allocate task-specific structure for placement policy here */
820 
821 	/*
822 	 * If pages are properly placed (did not migrate) then scan slower.
823 	 * This is reset periodically in case of phase changes
824 	 */
825         if (!migrated)
826 		p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
827 			p->numa_scan_period + jiffies_to_msecs(10));
828 
829 	task_numa_placement(p);
830 }
831 
reset_ptenuma_scan(struct task_struct * p)832 static void reset_ptenuma_scan(struct task_struct *p)
833 {
834 	ACCESS_ONCE(p->mm->numa_scan_seq)++;
835 	p->mm->numa_scan_offset = 0;
836 }
837 
838 /*
839  * The expensive part of numa migration is done from task_work context.
840  * Triggered from task_tick_numa().
841  */
task_numa_work(struct callback_head * work)842 void task_numa_work(struct callback_head *work)
843 {
844 	unsigned long migrate, next_scan, now = jiffies;
845 	struct task_struct *p = current;
846 	struct mm_struct *mm = p->mm;
847 	struct vm_area_struct *vma;
848 	unsigned long start, end;
849 	long pages;
850 
851 	WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
852 
853 	work->next = work; /* protect against double add */
854 	/*
855 	 * Who cares about NUMA placement when they're dying.
856 	 *
857 	 * NOTE: make sure not to dereference p->mm before this check,
858 	 * exit_task_work() happens _after_ exit_mm() so we could be called
859 	 * without p->mm even though we still had it when we enqueued this
860 	 * work.
861 	 */
862 	if (p->flags & PF_EXITING)
863 		return;
864 
865 	/*
866 	 * We do not care about task placement until a task runs on a node
867 	 * other than the first one used by the address space. This is
868 	 * largely because migrations are driven by what CPU the task
869 	 * is running on. If it's never scheduled on another node, it'll
870 	 * not migrate so why bother trapping the fault.
871 	 */
872 	if (mm->first_nid == NUMA_PTE_SCAN_INIT)
873 		mm->first_nid = numa_node_id();
874 	if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
875 		/* Are we running on a new node yet? */
876 		if (numa_node_id() == mm->first_nid &&
877 		    !sched_feat_numa(NUMA_FORCE))
878 			return;
879 
880 		mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
881 	}
882 
883 	/*
884 	 * Reset the scan period if enough time has gone by. Objective is that
885 	 * scanning will be reduced if pages are properly placed. As tasks
886 	 * can enter different phases this needs to be re-examined. Lacking
887 	 * proper tracking of reference behaviour, this blunt hammer is used.
888 	 */
889 	migrate = mm->numa_next_reset;
890 	if (time_after(now, migrate)) {
891 		p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
892 		next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
893 		xchg(&mm->numa_next_reset, next_scan);
894 	}
895 
896 	/*
897 	 * Enforce maximal scan/migration frequency..
898 	 */
899 	migrate = mm->numa_next_scan;
900 	if (time_before(now, migrate))
901 		return;
902 
903 	if (p->numa_scan_period == 0)
904 		p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
905 
906 	next_scan = now + msecs_to_jiffies(p->numa_scan_period);
907 	if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
908 		return;
909 
910 	/*
911 	 * Do not set pte_numa if the current running node is rate-limited.
912 	 * This loses statistics on the fault but if we are unwilling to
913 	 * migrate to this node, it is less likely we can do useful work
914 	 */
915 	if (migrate_ratelimited(numa_node_id()))
916 		return;
917 
918 	start = mm->numa_scan_offset;
919 	pages = sysctl_numa_balancing_scan_size;
920 	pages <<= 20 - PAGE_SHIFT; /* MB in pages */
921 	if (!pages)
922 		return;
923 
924 	down_read(&mm->mmap_sem);
925 	vma = find_vma(mm, start);
926 	if (!vma) {
927 		reset_ptenuma_scan(p);
928 		start = 0;
929 		vma = mm->mmap;
930 	}
931 	for (; vma; vma = vma->vm_next) {
932 		if (!vma_migratable(vma))
933 			continue;
934 
935 		/* Skip small VMAs. They are not likely to be of relevance */
936 		if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
937 			continue;
938 
939 		do {
940 			start = max(start, vma->vm_start);
941 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
942 			end = min(end, vma->vm_end);
943 			pages -= change_prot_numa(vma, start, end);
944 
945 			start = end;
946 			if (pages <= 0)
947 				goto out;
948 		} while (end != vma->vm_end);
949 	}
950 
951 out:
952 	/*
953 	 * It is possible to reach the end of the VMA list but the last few VMAs are
954 	 * not guaranteed to the vma_migratable. If they are not, we would find the
955 	 * !migratable VMA on the next scan but not reset the scanner to the start
956 	 * so check it now.
957 	 */
958 	if (vma)
959 		mm->numa_scan_offset = start;
960 	else
961 		reset_ptenuma_scan(p);
962 	up_read(&mm->mmap_sem);
963 }
964 
965 /*
966  * Drive the periodic memory faults..
967  */
task_tick_numa(struct rq * rq,struct task_struct * curr)968 void task_tick_numa(struct rq *rq, struct task_struct *curr)
969 {
970 	struct callback_head *work = &curr->numa_work;
971 	u64 period, now;
972 
973 	/*
974 	 * We don't care about NUMA placement if we don't have memory.
975 	 */
976 	if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
977 		return;
978 
979 	/*
980 	 * Using runtime rather than walltime has the dual advantage that
981 	 * we (mostly) drive the selection from busy threads and that the
982 	 * task needs to have done some actual work before we bother with
983 	 * NUMA placement.
984 	 */
985 	now = curr->se.sum_exec_runtime;
986 	period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
987 
988 	if (now - curr->node_stamp > period) {
989 		if (!curr->node_stamp)
990 			curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
991 		curr->node_stamp = now;
992 
993 		if (!time_before(jiffies, curr->mm->numa_next_scan)) {
994 			init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
995 			task_work_add(curr, work, true);
996 		}
997 	}
998 }
999 #else
task_tick_numa(struct rq * rq,struct task_struct * curr)1000 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1001 {
1002 }
1003 #endif /* CONFIG_NUMA_BALANCING */
1004 
1005 static void
account_entity_enqueue(struct cfs_rq * cfs_rq,struct sched_entity * se)1006 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1007 {
1008 	update_load_add(&cfs_rq->load, se->load.weight);
1009 	if (!parent_entity(se))
1010 		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
1011 #ifdef CONFIG_SMP
1012 	if (entity_is_task(se))
1013 		list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
1014 #endif
1015 	cfs_rq->nr_running++;
1016 }
1017 
1018 static void
account_entity_dequeue(struct cfs_rq * cfs_rq,struct sched_entity * se)1019 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1020 {
1021 	update_load_sub(&cfs_rq->load, se->load.weight);
1022 	if (!parent_entity(se))
1023 		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
1024 	if (entity_is_task(se))
1025 		list_del_init(&se->group_node);
1026 	cfs_rq->nr_running--;
1027 }
1028 
1029 #ifdef CONFIG_FAIR_GROUP_SCHED
1030 # ifdef CONFIG_SMP
calc_tg_weight(struct task_group * tg,struct cfs_rq * cfs_rq)1031 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1032 {
1033 	long tg_weight;
1034 
1035 	/*
1036 	 * Use this CPU's actual weight instead of the last load_contribution
1037 	 * to gain a more accurate current total weight. See
1038 	 * update_cfs_rq_load_contribution().
1039 	 */
1040 	tg_weight = atomic64_read(&tg->load_avg);
1041 	tg_weight -= cfs_rq->tg_load_contrib;
1042 	tg_weight += cfs_rq->load.weight;
1043 
1044 	return tg_weight;
1045 }
1046 
calc_cfs_shares(struct cfs_rq * cfs_rq,struct task_group * tg)1047 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
1048 {
1049 	long tg_weight, load, shares;
1050 
1051 	tg_weight = calc_tg_weight(tg, cfs_rq);
1052 	load = cfs_rq->load.weight;
1053 
1054 	shares = (tg->shares * load);
1055 	if (tg_weight)
1056 		shares /= tg_weight;
1057 
1058 	if (shares < MIN_SHARES)
1059 		shares = MIN_SHARES;
1060 	if (shares > tg->shares)
1061 		shares = tg->shares;
1062 
1063 	return shares;
1064 }
1065 # else /* CONFIG_SMP */
calc_cfs_shares(struct cfs_rq * cfs_rq,struct task_group * tg)1066 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
1067 {
1068 	return tg->shares;
1069 }
1070 # endif /* CONFIG_SMP */
reweight_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,unsigned long weight)1071 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1072 			    unsigned long weight)
1073 {
1074 	if (se->on_rq) {
1075 		/* commit outstanding execution time */
1076 		if (cfs_rq->curr == se)
1077 			update_curr(cfs_rq);
1078 		account_entity_dequeue(cfs_rq, se);
1079 	}
1080 
1081 	update_load_set(&se->load, weight);
1082 
1083 	if (se->on_rq)
1084 		account_entity_enqueue(cfs_rq, se);
1085 }
1086 
1087 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1088 
update_cfs_shares(struct cfs_rq * cfs_rq)1089 static void update_cfs_shares(struct cfs_rq *cfs_rq)
1090 {
1091 	struct task_group *tg;
1092 	struct sched_entity *se;
1093 	long shares;
1094 
1095 	tg = cfs_rq->tg;
1096 	se = tg->se[cpu_of(rq_of(cfs_rq))];
1097 	if (!se || throttled_hierarchy(cfs_rq))
1098 		return;
1099 #ifndef CONFIG_SMP
1100 	if (likely(se->load.weight == tg->shares))
1101 		return;
1102 #endif
1103 	shares = calc_cfs_shares(cfs_rq, tg);
1104 
1105 	reweight_entity(cfs_rq_of(se), se, shares);
1106 }
1107 #else /* CONFIG_FAIR_GROUP_SCHED */
update_cfs_shares(struct cfs_rq * cfs_rq)1108 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
1109 {
1110 }
1111 #endif /* CONFIG_FAIR_GROUP_SCHED */
1112 
1113 /* Only depends on SMP, FAIR_GROUP_SCHED may be removed when useful in lb */
1114 #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
1115 /*
1116  * We choose a half-life close to 1 scheduling period.
1117  * Note: The tables below are dependent on this value.
1118  */
1119 #define LOAD_AVG_PERIOD 32
1120 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1121 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1122 
1123 /* Precomputed fixed inverse multiplies for multiplication by y^n */
1124 static const u32 runnable_avg_yN_inv[] = {
1125 	0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1126 	0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1127 	0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1128 	0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1129 	0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1130 	0x85aac367, 0x82cd8698,
1131 };
1132 
1133 /*
1134  * Precomputed \Sum y^k { 1<=k<=n }.  These are floor(true_value) to prevent
1135  * over-estimates when re-combining.
1136  */
1137 static const u32 runnable_avg_yN_sum[] = {
1138 	    0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1139 	 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1140 	17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1141 };
1142 
1143 /*
1144  * Approximate:
1145  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
1146  */
decay_load(u64 val,u64 n)1147 static __always_inline u64 decay_load(u64 val, u64 n)
1148 {
1149 	unsigned int local_n;
1150 
1151 	if (!n)
1152 		return val;
1153 	else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1154 		return 0;
1155 
1156 	/* after bounds checking we can collapse to 32-bit */
1157 	local_n = n;
1158 
1159 	/*
1160 	 * As y^PERIOD = 1/2, we can combine
1161 	 *    y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1162 	 * With a look-up table which covers k^n (n<PERIOD)
1163 	 *
1164 	 * To achieve constant time decay_load.
1165 	 */
1166 	if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1167 		val >>= local_n / LOAD_AVG_PERIOD;
1168 		local_n %= LOAD_AVG_PERIOD;
1169 	}
1170 
1171 	val *= runnable_avg_yN_inv[local_n];
1172 	/* We don't use SRR here since we always want to round down. */
1173 	return val >> 32;
1174 }
1175 
1176 /*
1177  * For updates fully spanning n periods, the contribution to runnable
1178  * average will be: \Sum 1024*y^n
1179  *
1180  * We can compute this reasonably efficiently by combining:
1181  *   y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for  n <PERIOD}
1182  */
__compute_runnable_contrib(u64 n)1183 static u32 __compute_runnable_contrib(u64 n)
1184 {
1185 	u32 contrib = 0;
1186 
1187 	if (likely(n <= LOAD_AVG_PERIOD))
1188 		return runnable_avg_yN_sum[n];
1189 	else if (unlikely(n >= LOAD_AVG_MAX_N))
1190 		return LOAD_AVG_MAX;
1191 
1192 	/* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1193 	do {
1194 		contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1195 		contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1196 
1197 		n -= LOAD_AVG_PERIOD;
1198 	} while (n > LOAD_AVG_PERIOD);
1199 
1200 	contrib = decay_load(contrib, n);
1201 	return contrib + runnable_avg_yN_sum[n];
1202 }
1203 
1204 /*
1205  * We can represent the historical contribution to runnable average as the
1206  * coefficients of a geometric series.  To do this we sub-divide our runnable
1207  * history into segments of approximately 1ms (1024us); label the segment that
1208  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1209  *
1210  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1211  *      p0            p1           p2
1212  *     (now)       (~1ms ago)  (~2ms ago)
1213  *
1214  * Let u_i denote the fraction of p_i that the entity was runnable.
1215  *
1216  * We then designate the fractions u_i as our co-efficients, yielding the
1217  * following representation of historical load:
1218  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1219  *
1220  * We choose y based on the with of a reasonably scheduling period, fixing:
1221  *   y^32 = 0.5
1222  *
1223  * This means that the contribution to load ~32ms ago (u_32) will be weighted
1224  * approximately half as much as the contribution to load within the last ms
1225  * (u_0).
1226  *
1227  * When a period "rolls over" and we have new u_0`, multiplying the previous
1228  * sum again by y is sufficient to update:
1229  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1230  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1231  */
__update_entity_runnable_avg(u64 now,struct sched_avg * sa,int runnable)1232 static __always_inline int __update_entity_runnable_avg(u64 now,
1233 							struct sched_avg *sa,
1234 							int runnable)
1235 {
1236 	u64 delta, periods;
1237 	u32 runnable_contrib;
1238 	int delta_w, decayed = 0;
1239 
1240 	delta = now - sa->last_runnable_update;
1241 	/*
1242 	 * This should only happen when time goes backwards, which it
1243 	 * unfortunately does during sched clock init when we swap over to TSC.
1244 	 */
1245 	if ((s64)delta < 0) {
1246 		sa->last_runnable_update = now;
1247 		return 0;
1248 	}
1249 
1250 	/*
1251 	 * Use 1024ns as the unit of measurement since it's a reasonable
1252 	 * approximation of 1us and fast to compute.
1253 	 */
1254 	delta >>= 10;
1255 	if (!delta)
1256 		return 0;
1257 	sa->last_runnable_update = now;
1258 
1259 	/* delta_w is the amount already accumulated against our next period */
1260 	delta_w = sa->runnable_avg_period % 1024;
1261 	if (delta + delta_w >= 1024) {
1262 		/* period roll-over */
1263 		decayed = 1;
1264 
1265 		/*
1266 		 * Now that we know we're crossing a period boundary, figure
1267 		 * out how much from delta we need to complete the current
1268 		 * period and accrue it.
1269 		 */
1270 		delta_w = 1024 - delta_w;
1271 		if (runnable)
1272 			sa->runnable_avg_sum += delta_w;
1273 		sa->runnable_avg_period += delta_w;
1274 
1275 		delta -= delta_w;
1276 
1277 		/* Figure out how many additional periods this update spans */
1278 		periods = delta / 1024;
1279 		delta %= 1024;
1280 
1281 		sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1282 						  periods + 1);
1283 		sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1284 						     periods + 1);
1285 
1286 		/* Efficiently calculate \sum (1..n_period) 1024*y^i */
1287 		runnable_contrib = __compute_runnable_contrib(periods);
1288 		if (runnable)
1289 			sa->runnable_avg_sum += runnable_contrib;
1290 		sa->runnable_avg_period += runnable_contrib;
1291 	}
1292 
1293 	/* Remainder of delta accrued against u_0` */
1294 	if (runnable)
1295 		sa->runnable_avg_sum += delta;
1296 	sa->runnable_avg_period += delta;
1297 
1298 	return decayed;
1299 }
1300 
1301 /* Synchronize an entity's decay with its parenting cfs_rq.*/
__synchronize_entity_decay(struct sched_entity * se)1302 static inline u64 __synchronize_entity_decay(struct sched_entity *se)
1303 {
1304 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
1305 	u64 decays = atomic64_read(&cfs_rq->decay_counter);
1306 
1307 	decays -= se->avg.decay_count;
1308 	if (!decays)
1309 		return 0;
1310 
1311 	se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1312 	se->avg.decay_count = 0;
1313 
1314 	return decays;
1315 }
1316 
1317 #ifdef CONFIG_FAIR_GROUP_SCHED
__update_cfs_rq_tg_load_contrib(struct cfs_rq * cfs_rq,int force_update)1318 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1319 						 int force_update)
1320 {
1321 	struct task_group *tg = cfs_rq->tg;
1322 	s64 tg_contrib;
1323 
1324 	tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1325 	tg_contrib -= cfs_rq->tg_load_contrib;
1326 
1327 	if (force_update || abs64(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1328 		atomic64_add(tg_contrib, &tg->load_avg);
1329 		cfs_rq->tg_load_contrib += tg_contrib;
1330 	}
1331 }
1332 
1333 /*
1334  * Aggregate cfs_rq runnable averages into an equivalent task_group
1335  * representation for computing load contributions.
1336  */
__update_tg_runnable_avg(struct sched_avg * sa,struct cfs_rq * cfs_rq)1337 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1338 						  struct cfs_rq *cfs_rq)
1339 {
1340 	struct task_group *tg = cfs_rq->tg;
1341 	long contrib;
1342 
1343 	/* The fraction of a cpu used by this cfs_rq */
1344 	contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1345 			  sa->runnable_avg_period + 1);
1346 	contrib -= cfs_rq->tg_runnable_contrib;
1347 
1348 	if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1349 		atomic_add(contrib, &tg->runnable_avg);
1350 		cfs_rq->tg_runnable_contrib += contrib;
1351 	}
1352 }
1353 
__update_group_entity_contrib(struct sched_entity * se)1354 static inline void __update_group_entity_contrib(struct sched_entity *se)
1355 {
1356 	struct cfs_rq *cfs_rq = group_cfs_rq(se);
1357 	struct task_group *tg = cfs_rq->tg;
1358 	int runnable_avg;
1359 
1360 	u64 contrib;
1361 
1362 	contrib = cfs_rq->tg_load_contrib * tg->shares;
1363 	se->avg.load_avg_contrib = div64_u64(contrib,
1364 					     atomic64_read(&tg->load_avg) + 1);
1365 
1366 	/*
1367 	 * For group entities we need to compute a correction term in the case
1368 	 * that they are consuming <1 cpu so that we would contribute the same
1369 	 * load as a task of equal weight.
1370 	 *
1371 	 * Explicitly co-ordinating this measurement would be expensive, but
1372 	 * fortunately the sum of each cpus contribution forms a usable
1373 	 * lower-bound on the true value.
1374 	 *
1375 	 * Consider the aggregate of 2 contributions.  Either they are disjoint
1376 	 * (and the sum represents true value) or they are disjoint and we are
1377 	 * understating by the aggregate of their overlap.
1378 	 *
1379 	 * Extending this to N cpus, for a given overlap, the maximum amount we
1380 	 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1381 	 * cpus that overlap for this interval and w_i is the interval width.
1382 	 *
1383 	 * On a small machine; the first term is well-bounded which bounds the
1384 	 * total error since w_i is a subset of the period.  Whereas on a
1385 	 * larger machine, while this first term can be larger, if w_i is the
1386 	 * of consequential size guaranteed to see n_i*w_i quickly converge to
1387 	 * our upper bound of 1-cpu.
1388 	 */
1389 	runnable_avg = atomic_read(&tg->runnable_avg);
1390 	if (runnable_avg < NICE_0_LOAD) {
1391 		se->avg.load_avg_contrib *= runnable_avg;
1392 		se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1393 	}
1394 }
1395 #else
__update_cfs_rq_tg_load_contrib(struct cfs_rq * cfs_rq,int force_update)1396 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1397 						 int force_update) {}
__update_tg_runnable_avg(struct sched_avg * sa,struct cfs_rq * cfs_rq)1398 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1399 						  struct cfs_rq *cfs_rq) {}
__update_group_entity_contrib(struct sched_entity * se)1400 static inline void __update_group_entity_contrib(struct sched_entity *se) {}
1401 #endif
1402 
__update_task_entity_contrib(struct sched_entity * se)1403 static inline void __update_task_entity_contrib(struct sched_entity *se)
1404 {
1405 	u32 contrib;
1406 
1407 	/* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1408 	contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1409 	contrib /= (se->avg.runnable_avg_period + 1);
1410 	se->avg.load_avg_contrib = scale_load(contrib);
1411 }
1412 
1413 /* Compute the current contribution to load_avg by se, return any delta */
__update_entity_load_avg_contrib(struct sched_entity * se)1414 static long __update_entity_load_avg_contrib(struct sched_entity *se)
1415 {
1416 	long old_contrib = se->avg.load_avg_contrib;
1417 
1418 	if (entity_is_task(se)) {
1419 		__update_task_entity_contrib(se);
1420 	} else {
1421 		__update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
1422 		__update_group_entity_contrib(se);
1423 	}
1424 
1425 	return se->avg.load_avg_contrib - old_contrib;
1426 }
1427 
subtract_blocked_load_contrib(struct cfs_rq * cfs_rq,long load_contrib)1428 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1429 						 long load_contrib)
1430 {
1431 	if (likely(load_contrib < cfs_rq->blocked_load_avg))
1432 		cfs_rq->blocked_load_avg -= load_contrib;
1433 	else
1434 		cfs_rq->blocked_load_avg = 0;
1435 }
1436 
1437 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1438 
1439 /* Update a sched_entity's runnable average */
update_entity_load_avg(struct sched_entity * se,int update_cfs_rq)1440 static inline void update_entity_load_avg(struct sched_entity *se,
1441 					  int update_cfs_rq)
1442 {
1443 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
1444 	long contrib_delta;
1445 	u64 now;
1446 
1447 	/*
1448 	 * For a group entity we need to use their owned cfs_rq_clock_task() in
1449 	 * case they are the parent of a throttled hierarchy.
1450 	 */
1451 	if (entity_is_task(se))
1452 		now = cfs_rq_clock_task(cfs_rq);
1453 	else
1454 		now = cfs_rq_clock_task(group_cfs_rq(se));
1455 
1456 	if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
1457 		return;
1458 
1459 	contrib_delta = __update_entity_load_avg_contrib(se);
1460 
1461 	if (!update_cfs_rq)
1462 		return;
1463 
1464 	if (se->on_rq)
1465 		cfs_rq->runnable_load_avg += contrib_delta;
1466 	else
1467 		subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1468 }
1469 
1470 /*
1471  * Decay the load contributed by all blocked children and account this so that
1472  * their contribution may appropriately discounted when they wake up.
1473  */
update_cfs_rq_blocked_load(struct cfs_rq * cfs_rq,int force_update)1474 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
1475 {
1476 	u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
1477 	u64 decays;
1478 
1479 	decays = now - cfs_rq->last_decay;
1480 	if (!decays && !force_update)
1481 		return;
1482 
1483 	if (atomic64_read(&cfs_rq->removed_load)) {
1484 		u64 removed_load = atomic64_xchg(&cfs_rq->removed_load, 0);
1485 		subtract_blocked_load_contrib(cfs_rq, removed_load);
1486 	}
1487 
1488 	if (decays) {
1489 		cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1490 						      decays);
1491 		atomic64_add(decays, &cfs_rq->decay_counter);
1492 		cfs_rq->last_decay = now;
1493 	}
1494 
1495 	__update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
1496 }
1497 
update_rq_runnable_avg(struct rq * rq,int runnable)1498 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1499 {
1500 	__update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
1501 	__update_tg_runnable_avg(&rq->avg, &rq->cfs);
1502 }
1503 
1504 /* Add the load generated by se into cfs_rq's child load-average */
enqueue_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se,int wakeup)1505 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1506 						  struct sched_entity *se,
1507 						  int wakeup)
1508 {
1509 	/*
1510 	 * We track migrations using entity decay_count <= 0, on a wake-up
1511 	 * migration we use a negative decay count to track the remote decays
1512 	 * accumulated while sleeping.
1513 	 */
1514 	if (unlikely(se->avg.decay_count <= 0)) {
1515 		se->avg.last_runnable_update = rq_of(cfs_rq)->clock_task;
1516 		if (se->avg.decay_count) {
1517 			/*
1518 			 * In a wake-up migration we have to approximate the
1519 			 * time sleeping.  This is because we can't synchronize
1520 			 * clock_task between the two cpus, and it is not
1521 			 * guaranteed to be read-safe.  Instead, we can
1522 			 * approximate this using our carried decays, which are
1523 			 * explicitly atomically readable.
1524 			 */
1525 			se->avg.last_runnable_update -= (-se->avg.decay_count)
1526 							<< 20;
1527 			update_entity_load_avg(se, 0);
1528 			/* Indicate that we're now synchronized and on-rq */
1529 			se->avg.decay_count = 0;
1530 		}
1531 		wakeup = 0;
1532 	} else {
1533 		__synchronize_entity_decay(se);
1534 	}
1535 
1536 	/* migrated tasks did not contribute to our blocked load */
1537 	if (wakeup) {
1538 		subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
1539 		update_entity_load_avg(se, 0);
1540 	}
1541 
1542 	cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
1543 	/* we force update consideration on load-balancer moves */
1544 	update_cfs_rq_blocked_load(cfs_rq, !wakeup);
1545 }
1546 
1547 /*
1548  * Remove se's load from this cfs_rq child load-average, if the entity is
1549  * transitioning to a blocked state we track its projected decay using
1550  * blocked_load_avg.
1551  */
dequeue_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se,int sleep)1552 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
1553 						  struct sched_entity *se,
1554 						  int sleep)
1555 {
1556 	update_entity_load_avg(se, 1);
1557 	/* we force update consideration on load-balancer moves */
1558 	update_cfs_rq_blocked_load(cfs_rq, !sleep);
1559 
1560 	cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
1561 	if (sleep) {
1562 		cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1563 		se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1564 	} /* migrations, e.g. sleep=0 leave decay_count == 0 */
1565 }
1566 
1567 /*
1568  * Update the rq's load with the elapsed running time before entering
1569  * idle. if the last scheduled task is not a CFS task, idle_enter will
1570  * be the only way to update the runnable statistic.
1571  */
idle_enter_fair(struct rq * this_rq)1572 void idle_enter_fair(struct rq *this_rq)
1573 {
1574 	update_rq_runnable_avg(this_rq, 1);
1575 }
1576 
1577 /*
1578  * Update the rq's load with the elapsed idle time before a task is
1579  * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
1580  * be the only way to update the runnable statistic.
1581  */
idle_exit_fair(struct rq * this_rq)1582 void idle_exit_fair(struct rq *this_rq)
1583 {
1584 	update_rq_runnable_avg(this_rq, 0);
1585 }
1586 
1587 #else
update_entity_load_avg(struct sched_entity * se,int update_cfs_rq)1588 static inline void update_entity_load_avg(struct sched_entity *se,
1589 					  int update_cfs_rq) {}
update_rq_runnable_avg(struct rq * rq,int runnable)1590 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
enqueue_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se,int wakeup)1591 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1592 					   struct sched_entity *se,
1593 					   int wakeup) {}
dequeue_entity_load_avg(struct cfs_rq * cfs_rq,struct sched_entity * se,int sleep)1594 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
1595 					   struct sched_entity *se,
1596 					   int sleep) {}
update_cfs_rq_blocked_load(struct cfs_rq * cfs_rq,int force_update)1597 static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1598 					      int force_update) {}
1599 #endif
1600 
enqueue_sleeper(struct cfs_rq * cfs_rq,struct sched_entity * se)1601 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
1602 {
1603 #ifdef CONFIG_SCHEDSTATS
1604 	struct task_struct *tsk = NULL;
1605 
1606 	if (entity_is_task(se))
1607 		tsk = task_of(se);
1608 
1609 	if (se->statistics.sleep_start) {
1610 		u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
1611 
1612 		if ((s64)delta < 0)
1613 			delta = 0;
1614 
1615 		if (unlikely(delta > se->statistics.sleep_max))
1616 			se->statistics.sleep_max = delta;
1617 
1618 		se->statistics.sleep_start = 0;
1619 		se->statistics.sum_sleep_runtime += delta;
1620 
1621 		if (tsk) {
1622 			account_scheduler_latency(tsk, delta >> 10, 1);
1623 			trace_sched_stat_sleep(tsk, delta);
1624 		}
1625 	}
1626 	if (se->statistics.block_start) {
1627 		u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
1628 
1629 		if ((s64)delta < 0)
1630 			delta = 0;
1631 
1632 		if (unlikely(delta > se->statistics.block_max))
1633 			se->statistics.block_max = delta;
1634 
1635 		se->statistics.block_start = 0;
1636 		se->statistics.sum_sleep_runtime += delta;
1637 
1638 		if (tsk) {
1639 			if (tsk->in_iowait) {
1640 				se->statistics.iowait_sum += delta;
1641 				se->statistics.iowait_count++;
1642 				trace_sched_stat_iowait(tsk, delta);
1643 			}
1644 
1645 			trace_sched_stat_blocked(tsk, delta);
1646 			trace_sched_blocked_reason(tsk);
1647 
1648 			/*
1649 			 * Blocking time is in units of nanosecs, so shift by
1650 			 * 20 to get a milliseconds-range estimation of the
1651 			 * amount of time that the task spent sleeping:
1652 			 */
1653 			if (unlikely(prof_on == SLEEP_PROFILING)) {
1654 				profile_hits(SLEEP_PROFILING,
1655 						(void *)get_wchan(tsk),
1656 						delta >> 20);
1657 			}
1658 			account_scheduler_latency(tsk, delta >> 10, 0);
1659 		}
1660 	}
1661 #endif
1662 }
1663 
check_spread(struct cfs_rq * cfs_rq,struct sched_entity * se)1664 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1665 {
1666 #ifdef CONFIG_SCHED_DEBUG
1667 	s64 d = se->vruntime - cfs_rq->min_vruntime;
1668 
1669 	if (d < 0)
1670 		d = -d;
1671 
1672 	if (d > 3*sysctl_sched_latency)
1673 		schedstat_inc(cfs_rq, nr_spread_over);
1674 #endif
1675 }
1676 
1677 static void
place_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int initial)1678 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1679 {
1680 	u64 vruntime = cfs_rq->min_vruntime;
1681 
1682 	/*
1683 	 * The 'current' period is already promised to the current tasks,
1684 	 * however the extra weight of the new task will slow them down a
1685 	 * little, place the new task so that it fits in the slot that
1686 	 * stays open at the end.
1687 	 */
1688 	if (initial && sched_feat(START_DEBIT))
1689 		vruntime += sched_vslice(cfs_rq, se);
1690 
1691 	/* sleeps up to a single latency don't count. */
1692 	if (!initial) {
1693 		unsigned long thresh = sysctl_sched_latency;
1694 
1695 		/*
1696 		 * Halve their sleep time's effect, to allow
1697 		 * for a gentler effect of sleepers:
1698 		 */
1699 		if (sched_feat(GENTLE_FAIR_SLEEPERS))
1700 			thresh >>= 1;
1701 
1702 		vruntime -= thresh;
1703 	}
1704 
1705 	/* ensure we never gain time by being placed backwards. */
1706 	se->vruntime = max_vruntime(se->vruntime, vruntime);
1707 }
1708 
1709 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1710 
1711 static void
enqueue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)1712 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1713 {
1714 	/*
1715 	 * Update the normalized vruntime before updating min_vruntime
1716 	 * through callig update_curr().
1717 	 */
1718 	if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
1719 		se->vruntime += cfs_rq->min_vruntime;
1720 
1721 	/*
1722 	 * Update run-time statistics of the 'current'.
1723 	 */
1724 	update_curr(cfs_rq);
1725 	enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
1726 	account_entity_enqueue(cfs_rq, se);
1727 	update_cfs_shares(cfs_rq);
1728 
1729 	if (flags & ENQUEUE_WAKEUP) {
1730 		place_entity(cfs_rq, se, 0);
1731 		enqueue_sleeper(cfs_rq, se);
1732 	}
1733 
1734 	update_stats_enqueue(cfs_rq, se);
1735 	check_spread(cfs_rq, se);
1736 	if (se != cfs_rq->curr)
1737 		__enqueue_entity(cfs_rq, se);
1738 	se->on_rq = 1;
1739 
1740 	if (cfs_rq->nr_running == 1) {
1741 		list_add_leaf_cfs_rq(cfs_rq);
1742 		check_enqueue_throttle(cfs_rq);
1743 	}
1744 }
1745 
__clear_buddies_last(struct sched_entity * se)1746 static void __clear_buddies_last(struct sched_entity *se)
1747 {
1748 	for_each_sched_entity(se) {
1749 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1750 		if (cfs_rq->last == se)
1751 			cfs_rq->last = NULL;
1752 		else
1753 			break;
1754 	}
1755 }
1756 
__clear_buddies_next(struct sched_entity * se)1757 static void __clear_buddies_next(struct sched_entity *se)
1758 {
1759 	for_each_sched_entity(se) {
1760 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1761 		if (cfs_rq->next == se)
1762 			cfs_rq->next = NULL;
1763 		else
1764 			break;
1765 	}
1766 }
1767 
__clear_buddies_skip(struct sched_entity * se)1768 static void __clear_buddies_skip(struct sched_entity *se)
1769 {
1770 	for_each_sched_entity(se) {
1771 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1772 		if (cfs_rq->skip == se)
1773 			cfs_rq->skip = NULL;
1774 		else
1775 			break;
1776 	}
1777 }
1778 
clear_buddies(struct cfs_rq * cfs_rq,struct sched_entity * se)1779 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1780 {
1781 	if (cfs_rq->last == se)
1782 		__clear_buddies_last(se);
1783 
1784 	if (cfs_rq->next == se)
1785 		__clear_buddies_next(se);
1786 
1787 	if (cfs_rq->skip == se)
1788 		__clear_buddies_skip(se);
1789 }
1790 
1791 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1792 
1793 static void
dequeue_entity(struct cfs_rq * cfs_rq,struct sched_entity * se,int flags)1794 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1795 {
1796 	/*
1797 	 * Update run-time statistics of the 'current'.
1798 	 */
1799 	update_curr(cfs_rq);
1800 	dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
1801 
1802 	update_stats_dequeue(cfs_rq, se);
1803 	if (flags & DEQUEUE_SLEEP) {
1804 #ifdef CONFIG_SCHEDSTATS
1805 		if (entity_is_task(se)) {
1806 			struct task_struct *tsk = task_of(se);
1807 
1808 			if (tsk->state & TASK_INTERRUPTIBLE)
1809 				se->statistics.sleep_start = rq_of(cfs_rq)->clock;
1810 			if (tsk->state & TASK_UNINTERRUPTIBLE)
1811 				se->statistics.block_start = rq_of(cfs_rq)->clock;
1812 		}
1813 #endif
1814 	}
1815 
1816 	clear_buddies(cfs_rq, se);
1817 
1818 	if (se != cfs_rq->curr)
1819 		__dequeue_entity(cfs_rq, se);
1820 	se->on_rq = 0;
1821 	account_entity_dequeue(cfs_rq, se);
1822 
1823 	/*
1824 	 * Normalize the entity after updating the min_vruntime because the
1825 	 * update can refer to the ->curr item and we need to reflect this
1826 	 * movement in our normalized position.
1827 	 */
1828 	if (!(flags & DEQUEUE_SLEEP))
1829 		se->vruntime -= cfs_rq->min_vruntime;
1830 
1831 	/* return excess runtime on last dequeue */
1832 	return_cfs_rq_runtime(cfs_rq);
1833 
1834 	update_min_vruntime(cfs_rq);
1835 	update_cfs_shares(cfs_rq);
1836 }
1837 
1838 /*
1839  * Preempt the current task with a newly woken task if needed:
1840  */
1841 static void
check_preempt_tick(struct cfs_rq * cfs_rq,struct sched_entity * curr)1842 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1843 {
1844 	unsigned long ideal_runtime, delta_exec;
1845 	struct sched_entity *se;
1846 	s64 delta;
1847 
1848 	ideal_runtime = sched_slice(cfs_rq, curr);
1849 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1850 	if (delta_exec > ideal_runtime) {
1851 		resched_task(rq_of(cfs_rq)->curr);
1852 		/*
1853 		 * The current task ran long enough, ensure it doesn't get
1854 		 * re-elected due to buddy favours.
1855 		 */
1856 		clear_buddies(cfs_rq, curr);
1857 		return;
1858 	}
1859 
1860 	/*
1861 	 * Ensure that a task that missed wakeup preemption by a
1862 	 * narrow margin doesn't have to wait for a full slice.
1863 	 * This also mitigates buddy induced latencies under load.
1864 	 */
1865 	if (delta_exec < sysctl_sched_min_granularity)
1866 		return;
1867 
1868 	se = __pick_first_entity(cfs_rq);
1869 	delta = curr->vruntime - se->vruntime;
1870 
1871 	if (delta < 0)
1872 		return;
1873 
1874 	if (delta > ideal_runtime)
1875 		resched_task(rq_of(cfs_rq)->curr);
1876 }
1877 
1878 static void
set_next_entity(struct cfs_rq * cfs_rq,struct sched_entity * se)1879 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
1880 {
1881 	/* 'current' is not kept within the tree. */
1882 	if (se->on_rq) {
1883 		/*
1884 		 * Any task has to be enqueued before it get to execute on
1885 		 * a CPU. So account for the time it spent waiting on the
1886 		 * runqueue.
1887 		 */
1888 		update_stats_wait_end(cfs_rq, se);
1889 		__dequeue_entity(cfs_rq, se);
1890 	}
1891 
1892 	update_stats_curr_start(cfs_rq, se);
1893 	cfs_rq->curr = se;
1894 #ifdef CONFIG_SCHEDSTATS
1895 	/*
1896 	 * Track our maximum slice length, if the CPU's load is at
1897 	 * least twice that of our own weight (i.e. dont track it
1898 	 * when there are only lesser-weight tasks around):
1899 	 */
1900 	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
1901 		se->statistics.slice_max = max(se->statistics.slice_max,
1902 			se->sum_exec_runtime - se->prev_sum_exec_runtime);
1903 	}
1904 #endif
1905 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
1906 }
1907 
1908 static int
1909 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1910 
1911 /*
1912  * Pick the next process, keeping these things in mind, in this order:
1913  * 1) keep things fair between processes/task groups
1914  * 2) pick the "next" process, since someone really wants that to run
1915  * 3) pick the "last" process, for cache locality
1916  * 4) do not run the "skip" process, if something else is available
1917  */
pick_next_entity(struct cfs_rq * cfs_rq)1918 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
1919 {
1920 	struct sched_entity *se = __pick_first_entity(cfs_rq);
1921 	struct sched_entity *left = se;
1922 
1923 	/*
1924 	 * Avoid running the skip buddy, if running something else can
1925 	 * be done without getting too unfair.
1926 	 */
1927 	if (cfs_rq->skip == se) {
1928 		struct sched_entity *second = __pick_next_entity(se);
1929 		if (second && wakeup_preempt_entity(second, left) < 1)
1930 			se = second;
1931 	}
1932 
1933 	/*
1934 	 * Prefer last buddy, try to return the CPU to a preempted task.
1935 	 */
1936 	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1937 		se = cfs_rq->last;
1938 
1939 	/*
1940 	 * Someone really wants this to run. If it's not unfair, run it.
1941 	 */
1942 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1943 		se = cfs_rq->next;
1944 
1945 	clear_buddies(cfs_rq, se);
1946 
1947 	return se;
1948 }
1949 
1950 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1951 
put_prev_entity(struct cfs_rq * cfs_rq,struct sched_entity * prev)1952 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
1953 {
1954 	/*
1955 	 * If still on the runqueue then deactivate_task()
1956 	 * was not called and update_curr() has to be done:
1957 	 */
1958 	if (prev->on_rq)
1959 		update_curr(cfs_rq);
1960 
1961 	/* throttle cfs_rqs exceeding runtime */
1962 	check_cfs_rq_runtime(cfs_rq);
1963 
1964 	check_spread(cfs_rq, prev);
1965 	if (prev->on_rq) {
1966 		update_stats_wait_start(cfs_rq, prev);
1967 		/* Put 'current' back into the tree. */
1968 		__enqueue_entity(cfs_rq, prev);
1969 		/* in !on_rq case, update occurred at dequeue */
1970 		update_entity_load_avg(prev, 1);
1971 	}
1972 	cfs_rq->curr = NULL;
1973 }
1974 
1975 static void
entity_tick(struct cfs_rq * cfs_rq,struct sched_entity * curr,int queued)1976 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
1977 {
1978 	/*
1979 	 * Update run-time statistics of the 'current'.
1980 	 */
1981 	update_curr(cfs_rq);
1982 
1983 	/*
1984 	 * Ensure that runnable average is periodically updated.
1985 	 */
1986 	update_entity_load_avg(curr, 1);
1987 	update_cfs_rq_blocked_load(cfs_rq, 1);
1988 
1989 #ifdef CONFIG_SCHED_HRTICK
1990 	/*
1991 	 * queued ticks are scheduled to match the slice, so don't bother
1992 	 * validating it and just reschedule.
1993 	 */
1994 	if (queued) {
1995 		resched_task(rq_of(cfs_rq)->curr);
1996 		return;
1997 	}
1998 	/*
1999 	 * don't let the period tick interfere with the hrtick preemption
2000 	 */
2001 	if (!sched_feat(DOUBLE_TICK) &&
2002 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2003 		return;
2004 #endif
2005 
2006 	if (cfs_rq->nr_running > 1)
2007 		check_preempt_tick(cfs_rq, curr);
2008 }
2009 
2010 
2011 /**************************************************
2012  * CFS bandwidth control machinery
2013  */
2014 
2015 #ifdef CONFIG_CFS_BANDWIDTH
2016 
2017 #ifdef HAVE_JUMP_LABEL
2018 static struct static_key __cfs_bandwidth_used;
2019 
cfs_bandwidth_used(void)2020 static inline bool cfs_bandwidth_used(void)
2021 {
2022 	return static_key_false(&__cfs_bandwidth_used);
2023 }
2024 
account_cfs_bandwidth_used(int enabled,int was_enabled)2025 void account_cfs_bandwidth_used(int enabled, int was_enabled)
2026 {
2027 	/* only need to count groups transitioning between enabled/!enabled */
2028 	if (enabled && !was_enabled)
2029 		static_key_slow_inc(&__cfs_bandwidth_used);
2030 	else if (!enabled && was_enabled)
2031 		static_key_slow_dec(&__cfs_bandwidth_used);
2032 }
2033 #else /* HAVE_JUMP_LABEL */
cfs_bandwidth_used(void)2034 static bool cfs_bandwidth_used(void)
2035 {
2036 	return true;
2037 }
2038 
account_cfs_bandwidth_used(int enabled,int was_enabled)2039 void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2040 #endif /* HAVE_JUMP_LABEL */
2041 
2042 /*
2043  * default period for cfs group bandwidth.
2044  * default: 0.1s, units: nanoseconds
2045  */
default_cfs_period(void)2046 static inline u64 default_cfs_period(void)
2047 {
2048 	return 100000000ULL;
2049 }
2050 
sched_cfs_bandwidth_slice(void)2051 static inline u64 sched_cfs_bandwidth_slice(void)
2052 {
2053 	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2054 }
2055 
2056 /*
2057  * Replenish runtime according to assigned quota and update expiration time.
2058  * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2059  * additional synchronization around rq->lock.
2060  *
2061  * requires cfs_b->lock
2062  */
__refill_cfs_bandwidth_runtime(struct cfs_bandwidth * cfs_b)2063 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
2064 {
2065 	u64 now;
2066 
2067 	if (cfs_b->quota == RUNTIME_INF)
2068 		return;
2069 
2070 	now = sched_clock_cpu(smp_processor_id());
2071 	cfs_b->runtime = cfs_b->quota;
2072 	cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2073 }
2074 
tg_cfs_bandwidth(struct task_group * tg)2075 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2076 {
2077 	return &tg->cfs_bandwidth;
2078 }
2079 
2080 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
cfs_rq_clock_task(struct cfs_rq * cfs_rq)2081 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2082 {
2083 	if (unlikely(cfs_rq->throttle_count))
2084 		return cfs_rq->throttled_clock_task;
2085 
2086 	return rq_of(cfs_rq)->clock_task - cfs_rq->throttled_clock_task_time;
2087 }
2088 
2089 /* returns 0 on failure to allocate runtime */
assign_cfs_rq_runtime(struct cfs_rq * cfs_rq)2090 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2091 {
2092 	struct task_group *tg = cfs_rq->tg;
2093 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
2094 	u64 amount = 0, min_amount, expires;
2095 
2096 	/* note: this is a positive sum as runtime_remaining <= 0 */
2097 	min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2098 
2099 	raw_spin_lock(&cfs_b->lock);
2100 	if (cfs_b->quota == RUNTIME_INF)
2101 		amount = min_amount;
2102 	else {
2103 		/*
2104 		 * If the bandwidth pool has become inactive, then at least one
2105 		 * period must have elapsed since the last consumption.
2106 		 * Refresh the global state and ensure bandwidth timer becomes
2107 		 * active.
2108 		 */
2109 		if (!cfs_b->timer_active) {
2110 			__refill_cfs_bandwidth_runtime(cfs_b);
2111 			__start_cfs_bandwidth(cfs_b);
2112 		}
2113 
2114 		if (cfs_b->runtime > 0) {
2115 			amount = min(cfs_b->runtime, min_amount);
2116 			cfs_b->runtime -= amount;
2117 			cfs_b->idle = 0;
2118 		}
2119 	}
2120 	expires = cfs_b->runtime_expires;
2121 	raw_spin_unlock(&cfs_b->lock);
2122 
2123 	cfs_rq->runtime_remaining += amount;
2124 	/*
2125 	 * we may have advanced our local expiration to account for allowed
2126 	 * spread between our sched_clock and the one on which runtime was
2127 	 * issued.
2128 	 */
2129 	if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2130 		cfs_rq->runtime_expires = expires;
2131 
2132 	return cfs_rq->runtime_remaining > 0;
2133 }
2134 
2135 /*
2136  * Note: This depends on the synchronization provided by sched_clock and the
2137  * fact that rq->clock snapshots this value.
2138  */
expire_cfs_rq_runtime(struct cfs_rq * cfs_rq)2139 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2140 {
2141 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2142 	struct rq *rq = rq_of(cfs_rq);
2143 
2144 	/* if the deadline is ahead of our clock, nothing to do */
2145 	if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0))
2146 		return;
2147 
2148 	if (cfs_rq->runtime_remaining < 0)
2149 		return;
2150 
2151 	/*
2152 	 * If the local deadline has passed we have to consider the
2153 	 * possibility that our sched_clock is 'fast' and the global deadline
2154 	 * has not truly expired.
2155 	 *
2156 	 * Fortunately we can check determine whether this the case by checking
2157 	 * whether the global deadline has advanced.
2158 	 */
2159 
2160 	if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2161 		/* extend local deadline, drift is bounded above by 2 ticks */
2162 		cfs_rq->runtime_expires += TICK_NSEC;
2163 	} else {
2164 		/* global deadline is ahead, expiration has passed */
2165 		cfs_rq->runtime_remaining = 0;
2166 	}
2167 }
2168 
__account_cfs_rq_runtime(struct cfs_rq * cfs_rq,unsigned long delta_exec)2169 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2170 				     unsigned long delta_exec)
2171 {
2172 	/* dock delta_exec before expiring quota (as it could span periods) */
2173 	cfs_rq->runtime_remaining -= delta_exec;
2174 	expire_cfs_rq_runtime(cfs_rq);
2175 
2176 	if (likely(cfs_rq->runtime_remaining > 0))
2177 		return;
2178 
2179 	/*
2180 	 * if we're unable to extend our runtime we resched so that the active
2181 	 * hierarchy can be throttled
2182 	 */
2183 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2184 		resched_task(rq_of(cfs_rq)->curr);
2185 }
2186 
2187 static __always_inline
account_cfs_rq_runtime(struct cfs_rq * cfs_rq,unsigned long delta_exec)2188 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
2189 {
2190 	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
2191 		return;
2192 
2193 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
2194 }
2195 
cfs_rq_throttled(struct cfs_rq * cfs_rq)2196 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2197 {
2198 	return cfs_bandwidth_used() && cfs_rq->throttled;
2199 }
2200 
2201 /* check whether cfs_rq, or any parent, is throttled */
throttled_hierarchy(struct cfs_rq * cfs_rq)2202 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2203 {
2204 	return cfs_bandwidth_used() && cfs_rq->throttle_count;
2205 }
2206 
2207 /*
2208  * Ensure that neither of the group entities corresponding to src_cpu or
2209  * dest_cpu are members of a throttled hierarchy when performing group
2210  * load-balance operations.
2211  */
throttled_lb_pair(struct task_group * tg,int src_cpu,int dest_cpu)2212 static inline int throttled_lb_pair(struct task_group *tg,
2213 				    int src_cpu, int dest_cpu)
2214 {
2215 	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2216 
2217 	src_cfs_rq = tg->cfs_rq[src_cpu];
2218 	dest_cfs_rq = tg->cfs_rq[dest_cpu];
2219 
2220 	return throttled_hierarchy(src_cfs_rq) ||
2221 	       throttled_hierarchy(dest_cfs_rq);
2222 }
2223 
2224 /* updated child weight may affect parent so we have to do this bottom up */
tg_unthrottle_up(struct task_group * tg,void * data)2225 static int tg_unthrottle_up(struct task_group *tg, void *data)
2226 {
2227 	struct rq *rq = data;
2228 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2229 
2230 	cfs_rq->throttle_count--;
2231 #ifdef CONFIG_SMP
2232 	if (!cfs_rq->throttle_count) {
2233 		/* adjust cfs_rq_clock_task() */
2234 		cfs_rq->throttled_clock_task_time += rq->clock_task -
2235 					     cfs_rq->throttled_clock_task;
2236 	}
2237 #endif
2238 
2239 	return 0;
2240 }
2241 
tg_throttle_down(struct task_group * tg,void * data)2242 static int tg_throttle_down(struct task_group *tg, void *data)
2243 {
2244 	struct rq *rq = data;
2245 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2246 
2247 	/* group is entering throttled state, stop time */
2248 	if (!cfs_rq->throttle_count)
2249 		cfs_rq->throttled_clock_task = rq->clock_task;
2250 	cfs_rq->throttle_count++;
2251 
2252 	return 0;
2253 }
2254 
throttle_cfs_rq(struct cfs_rq * cfs_rq)2255 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
2256 {
2257 	struct rq *rq = rq_of(cfs_rq);
2258 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2259 	struct sched_entity *se;
2260 	long task_delta, dequeue = 1;
2261 
2262 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2263 
2264 	/* freeze hierarchy runnable averages while throttled */
2265 	rcu_read_lock();
2266 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2267 	rcu_read_unlock();
2268 
2269 	task_delta = cfs_rq->h_nr_running;
2270 	for_each_sched_entity(se) {
2271 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2272 		/* throttled entity or throttle-on-deactivate */
2273 		if (!se->on_rq)
2274 			break;
2275 
2276 		if (dequeue)
2277 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2278 		qcfs_rq->h_nr_running -= task_delta;
2279 
2280 		if (qcfs_rq->load.weight)
2281 			dequeue = 0;
2282 	}
2283 
2284 	if (!se)
2285 		rq->nr_running -= task_delta;
2286 
2287 	cfs_rq->throttled = 1;
2288 	cfs_rq->throttled_clock = rq->clock;
2289 	raw_spin_lock(&cfs_b->lock);
2290 	list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2291 	raw_spin_unlock(&cfs_b->lock);
2292 }
2293 
unthrottle_cfs_rq(struct cfs_rq * cfs_rq)2294 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
2295 {
2296 	struct rq *rq = rq_of(cfs_rq);
2297 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2298 	struct sched_entity *se;
2299 	int enqueue = 1;
2300 	long task_delta;
2301 
2302 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2303 
2304 	cfs_rq->throttled = 0;
2305 	raw_spin_lock(&cfs_b->lock);
2306 	cfs_b->throttled_time += rq->clock - cfs_rq->throttled_clock;
2307 	list_del_rcu(&cfs_rq->throttled_list);
2308 	raw_spin_unlock(&cfs_b->lock);
2309 
2310 	update_rq_clock(rq);
2311 	/* update hierarchical throttle state */
2312 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2313 
2314 	if (!cfs_rq->load.weight)
2315 		return;
2316 
2317 	task_delta = cfs_rq->h_nr_running;
2318 	for_each_sched_entity(se) {
2319 		if (se->on_rq)
2320 			enqueue = 0;
2321 
2322 		cfs_rq = cfs_rq_of(se);
2323 		if (enqueue)
2324 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2325 		cfs_rq->h_nr_running += task_delta;
2326 
2327 		if (cfs_rq_throttled(cfs_rq))
2328 			break;
2329 	}
2330 
2331 	if (!se)
2332 		rq->nr_running += task_delta;
2333 
2334 	/* determine whether we need to wake up potentially idle cpu */
2335 	if (rq->curr == rq->idle && rq->cfs.nr_running)
2336 		resched_task(rq->curr);
2337 }
2338 
distribute_cfs_runtime(struct cfs_bandwidth * cfs_b,u64 remaining,u64 expires)2339 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2340 		u64 remaining, u64 expires)
2341 {
2342 	struct cfs_rq *cfs_rq;
2343 	u64 runtime = remaining;
2344 
2345 	rcu_read_lock();
2346 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2347 				throttled_list) {
2348 		struct rq *rq = rq_of(cfs_rq);
2349 
2350 		raw_spin_lock(&rq->lock);
2351 		if (!cfs_rq_throttled(cfs_rq))
2352 			goto next;
2353 
2354 		runtime = -cfs_rq->runtime_remaining + 1;
2355 		if (runtime > remaining)
2356 			runtime = remaining;
2357 		remaining -= runtime;
2358 
2359 		cfs_rq->runtime_remaining += runtime;
2360 		cfs_rq->runtime_expires = expires;
2361 
2362 		/* we check whether we're throttled above */
2363 		if (cfs_rq->runtime_remaining > 0)
2364 			unthrottle_cfs_rq(cfs_rq);
2365 
2366 next:
2367 		raw_spin_unlock(&rq->lock);
2368 
2369 		if (!remaining)
2370 			break;
2371 	}
2372 	rcu_read_unlock();
2373 
2374 	return remaining;
2375 }
2376 
2377 /*
2378  * Responsible for refilling a task_group's bandwidth and unthrottling its
2379  * cfs_rqs as appropriate. If there has been no activity within the last
2380  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2381  * used to track this state.
2382  */
do_sched_cfs_period_timer(struct cfs_bandwidth * cfs_b,int overrun)2383 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2384 {
2385 	u64 runtime, runtime_expires;
2386 	int idle = 1, throttled;
2387 
2388 	raw_spin_lock(&cfs_b->lock);
2389 	/* no need to continue the timer with no bandwidth constraint */
2390 	if (cfs_b->quota == RUNTIME_INF)
2391 		goto out_unlock;
2392 
2393 	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2394 	/* idle depends on !throttled (for the case of a large deficit) */
2395 	idle = cfs_b->idle && !throttled;
2396 	cfs_b->nr_periods += overrun;
2397 
2398 	/* if we're going inactive then everything else can be deferred */
2399 	if (idle)
2400 		goto out_unlock;
2401 
2402 	__refill_cfs_bandwidth_runtime(cfs_b);
2403 
2404 	if (!throttled) {
2405 		/* mark as potentially idle for the upcoming period */
2406 		cfs_b->idle = 1;
2407 		goto out_unlock;
2408 	}
2409 
2410 	/* account preceding periods in which throttling occurred */
2411 	cfs_b->nr_throttled += overrun;
2412 
2413 	/*
2414 	 * There are throttled entities so we must first use the new bandwidth
2415 	 * to unthrottle them before making it generally available.  This
2416 	 * ensures that all existing debts will be paid before a new cfs_rq is
2417 	 * allowed to run.
2418 	 */
2419 	runtime = cfs_b->runtime;
2420 	runtime_expires = cfs_b->runtime_expires;
2421 	cfs_b->runtime = 0;
2422 
2423 	/*
2424 	 * This check is repeated as we are holding onto the new bandwidth
2425 	 * while we unthrottle.  This can potentially race with an unthrottled
2426 	 * group trying to acquire new bandwidth from the global pool.
2427 	 */
2428 	while (throttled && runtime > 0) {
2429 		raw_spin_unlock(&cfs_b->lock);
2430 		/* we can't nest cfs_b->lock while distributing bandwidth */
2431 		runtime = distribute_cfs_runtime(cfs_b, runtime,
2432 						 runtime_expires);
2433 		raw_spin_lock(&cfs_b->lock);
2434 
2435 		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2436 	}
2437 
2438 	/* return (any) remaining runtime */
2439 	cfs_b->runtime = runtime;
2440 	/*
2441 	 * While we are ensured activity in the period following an
2442 	 * unthrottle, this also covers the case in which the new bandwidth is
2443 	 * insufficient to cover the existing bandwidth deficit.  (Forcing the
2444 	 * timer to remain active while there are any throttled entities.)
2445 	 */
2446 	cfs_b->idle = 0;
2447 out_unlock:
2448 	if (idle)
2449 		cfs_b->timer_active = 0;
2450 	raw_spin_unlock(&cfs_b->lock);
2451 
2452 	return idle;
2453 }
2454 
2455 /* a cfs_rq won't donate quota below this amount */
2456 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2457 /* minimum remaining period time to redistribute slack quota */
2458 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2459 /* how long we wait to gather additional slack before distributing */
2460 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2461 
2462 /* are we near the end of the current quota period? */
runtime_refresh_within(struct cfs_bandwidth * cfs_b,u64 min_expire)2463 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2464 {
2465 	struct hrtimer *refresh_timer = &cfs_b->period_timer;
2466 	u64 remaining;
2467 
2468 	/* if the call-back is running a quota refresh is already occurring */
2469 	if (hrtimer_callback_running(refresh_timer))
2470 		return 1;
2471 
2472 	/* is a quota refresh about to occur? */
2473 	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2474 	if (remaining < min_expire)
2475 		return 1;
2476 
2477 	return 0;
2478 }
2479 
start_cfs_slack_bandwidth(struct cfs_bandwidth * cfs_b)2480 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2481 {
2482 	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2483 
2484 	/* if there's a quota refresh soon don't bother with slack */
2485 	if (runtime_refresh_within(cfs_b, min_left))
2486 		return;
2487 
2488 	start_bandwidth_timer(&cfs_b->slack_timer,
2489 				ns_to_ktime(cfs_bandwidth_slack_period));
2490 }
2491 
2492 /* we know any runtime found here is valid as update_curr() precedes return */
__return_cfs_rq_runtime(struct cfs_rq * cfs_rq)2493 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2494 {
2495 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2496 	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2497 
2498 	if (slack_runtime <= 0)
2499 		return;
2500 
2501 	raw_spin_lock(&cfs_b->lock);
2502 	if (cfs_b->quota != RUNTIME_INF &&
2503 	    cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2504 		cfs_b->runtime += slack_runtime;
2505 
2506 		/* we are under rq->lock, defer unthrottling using a timer */
2507 		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2508 		    !list_empty(&cfs_b->throttled_cfs_rq))
2509 			start_cfs_slack_bandwidth(cfs_b);
2510 	}
2511 	raw_spin_unlock(&cfs_b->lock);
2512 
2513 	/* even if it's not valid for return we don't want to try again */
2514 	cfs_rq->runtime_remaining -= slack_runtime;
2515 }
2516 
return_cfs_rq_runtime(struct cfs_rq * cfs_rq)2517 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2518 {
2519 	if (!cfs_bandwidth_used())
2520 		return;
2521 
2522 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
2523 		return;
2524 
2525 	__return_cfs_rq_runtime(cfs_rq);
2526 }
2527 
2528 /*
2529  * This is done with a timer (instead of inline with bandwidth return) since
2530  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2531  */
do_sched_cfs_slack_timer(struct cfs_bandwidth * cfs_b)2532 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2533 {
2534 	u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2535 	u64 expires;
2536 
2537 	/* confirm we're still not at a refresh boundary */
2538 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2539 		return;
2540 
2541 	raw_spin_lock(&cfs_b->lock);
2542 	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2543 		runtime = cfs_b->runtime;
2544 		cfs_b->runtime = 0;
2545 	}
2546 	expires = cfs_b->runtime_expires;
2547 	raw_spin_unlock(&cfs_b->lock);
2548 
2549 	if (!runtime)
2550 		return;
2551 
2552 	runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2553 
2554 	raw_spin_lock(&cfs_b->lock);
2555 	if (expires == cfs_b->runtime_expires)
2556 		cfs_b->runtime = runtime;
2557 	raw_spin_unlock(&cfs_b->lock);
2558 }
2559 
2560 /*
2561  * When a group wakes up we want to make sure that its quota is not already
2562  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2563  * runtime as update_curr() throttling can not not trigger until it's on-rq.
2564  */
check_enqueue_throttle(struct cfs_rq * cfs_rq)2565 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2566 {
2567 	if (!cfs_bandwidth_used())
2568 		return;
2569 
2570 	/* an active group must be handled by the update_curr()->put() path */
2571 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2572 		return;
2573 
2574 	/* ensure the group is not already throttled */
2575 	if (cfs_rq_throttled(cfs_rq))
2576 		return;
2577 
2578 	/* update runtime allocation */
2579 	account_cfs_rq_runtime(cfs_rq, 0);
2580 	if (cfs_rq->runtime_remaining <= 0)
2581 		throttle_cfs_rq(cfs_rq);
2582 }
2583 
2584 /* conditionally throttle active cfs_rq's from put_prev_entity() */
check_cfs_rq_runtime(struct cfs_rq * cfs_rq)2585 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2586 {
2587 	if (!cfs_bandwidth_used())
2588 		return;
2589 
2590 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2591 		return;
2592 
2593 	/*
2594 	 * it's possible for a throttled entity to be forced into a running
2595 	 * state (e.g. set_curr_task), in this case we're finished.
2596 	 */
2597 	if (cfs_rq_throttled(cfs_rq))
2598 		return;
2599 
2600 	throttle_cfs_rq(cfs_rq);
2601 }
2602 
2603 static inline u64 default_cfs_period(void);
2604 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
2605 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
2606 
sched_cfs_slack_timer(struct hrtimer * timer)2607 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2608 {
2609 	struct cfs_bandwidth *cfs_b =
2610 		container_of(timer, struct cfs_bandwidth, slack_timer);
2611 	do_sched_cfs_slack_timer(cfs_b);
2612 
2613 	return HRTIMER_NORESTART;
2614 }
2615 
sched_cfs_period_timer(struct hrtimer * timer)2616 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2617 {
2618 	struct cfs_bandwidth *cfs_b =
2619 		container_of(timer, struct cfs_bandwidth, period_timer);
2620 	ktime_t now;
2621 	int overrun;
2622 	int idle = 0;
2623 
2624 	for (;;) {
2625 		now = hrtimer_cb_get_time(timer);
2626 		overrun = hrtimer_forward(timer, now, cfs_b->period);
2627 
2628 		if (!overrun)
2629 			break;
2630 
2631 		idle = do_sched_cfs_period_timer(cfs_b, overrun);
2632 	}
2633 
2634 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2635 }
2636 
init_cfs_bandwidth(struct cfs_bandwidth * cfs_b)2637 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2638 {
2639 	raw_spin_lock_init(&cfs_b->lock);
2640 	cfs_b->runtime = 0;
2641 	cfs_b->quota = RUNTIME_INF;
2642 	cfs_b->period = ns_to_ktime(default_cfs_period());
2643 
2644 	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2645 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2646 	cfs_b->period_timer.function = sched_cfs_period_timer;
2647 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2648 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
2649 }
2650 
init_cfs_rq_runtime(struct cfs_rq * cfs_rq)2651 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2652 {
2653 	cfs_rq->runtime_enabled = 0;
2654 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
2655 }
2656 
2657 /* requires cfs_b->lock, may release to reprogram timer */
__start_cfs_bandwidth(struct cfs_bandwidth * cfs_b)2658 void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2659 {
2660 	/*
2661 	 * The timer may be active because we're trying to set a new bandwidth
2662 	 * period or because we're racing with the tear-down path
2663 	 * (timer_active==0 becomes visible before the hrtimer call-back
2664 	 * terminates).  In either case we ensure that it's re-programmed
2665 	 */
2666 	while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2667 		raw_spin_unlock(&cfs_b->lock);
2668 		/* ensure cfs_b->lock is available while we wait */
2669 		hrtimer_cancel(&cfs_b->period_timer);
2670 
2671 		raw_spin_lock(&cfs_b->lock);
2672 		/* if someone else restarted the timer then we're done */
2673 		if (cfs_b->timer_active)
2674 			return;
2675 	}
2676 
2677 	cfs_b->timer_active = 1;
2678 	start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2679 }
2680 
destroy_cfs_bandwidth(struct cfs_bandwidth * cfs_b)2681 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2682 {
2683 	hrtimer_cancel(&cfs_b->period_timer);
2684 	hrtimer_cancel(&cfs_b->slack_timer);
2685 }
2686 
unthrottle_offline_cfs_rqs(struct rq * rq)2687 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
2688 {
2689 	struct cfs_rq *cfs_rq;
2690 
2691 	for_each_leaf_cfs_rq(rq, cfs_rq) {
2692 		struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2693 
2694 		if (!cfs_rq->runtime_enabled)
2695 			continue;
2696 
2697 		/*
2698 		 * clock_task is not advancing so we just need to make sure
2699 		 * there's some valid quota amount
2700 		 */
2701 		cfs_rq->runtime_remaining = cfs_b->quota;
2702 		if (cfs_rq_throttled(cfs_rq))
2703 			unthrottle_cfs_rq(cfs_rq);
2704 	}
2705 }
2706 
2707 #else /* CONFIG_CFS_BANDWIDTH */
cfs_rq_clock_task(struct cfs_rq * cfs_rq)2708 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2709 {
2710 	return rq_of(cfs_rq)->clock_task;
2711 }
2712 
account_cfs_rq_runtime(struct cfs_rq * cfs_rq,unsigned long delta_exec)2713 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2714 				     unsigned long delta_exec) {}
check_cfs_rq_runtime(struct cfs_rq * cfs_rq)2715 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
check_enqueue_throttle(struct cfs_rq * cfs_rq)2716 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
return_cfs_rq_runtime(struct cfs_rq * cfs_rq)2717 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2718 
cfs_rq_throttled(struct cfs_rq * cfs_rq)2719 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2720 {
2721 	return 0;
2722 }
2723 
throttled_hierarchy(struct cfs_rq * cfs_rq)2724 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2725 {
2726 	return 0;
2727 }
2728 
throttled_lb_pair(struct task_group * tg,int src_cpu,int dest_cpu)2729 static inline int throttled_lb_pair(struct task_group *tg,
2730 				    int src_cpu, int dest_cpu)
2731 {
2732 	return 0;
2733 }
2734 
init_cfs_bandwidth(struct cfs_bandwidth * cfs_b)2735 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2736 
2737 #ifdef CONFIG_FAIR_GROUP_SCHED
init_cfs_rq_runtime(struct cfs_rq * cfs_rq)2738 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2739 #endif
2740 
tg_cfs_bandwidth(struct task_group * tg)2741 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2742 {
2743 	return NULL;
2744 }
destroy_cfs_bandwidth(struct cfs_bandwidth * cfs_b)2745 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
unthrottle_offline_cfs_rqs(struct rq * rq)2746 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
2747 
2748 #endif /* CONFIG_CFS_BANDWIDTH */
2749 
2750 /**************************************************
2751  * CFS operations on tasks:
2752  */
2753 
2754 #ifdef CONFIG_SCHED_HRTICK
hrtick_start_fair(struct rq * rq,struct task_struct * p)2755 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2756 {
2757 	struct sched_entity *se = &p->se;
2758 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
2759 
2760 	WARN_ON(task_rq(p) != rq);
2761 
2762 	if (cfs_rq->nr_running > 1) {
2763 		u64 slice = sched_slice(cfs_rq, se);
2764 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2765 		s64 delta = slice - ran;
2766 
2767 		if (delta < 0) {
2768 			if (rq->curr == p)
2769 				resched_task(p);
2770 			return;
2771 		}
2772 
2773 		/*
2774 		 * Don't schedule slices shorter than 10000ns, that just
2775 		 * doesn't make sense. Rely on vruntime for fairness.
2776 		 */
2777 		if (rq->curr != p)
2778 			delta = max_t(s64, 10000LL, delta);
2779 
2780 		hrtick_start(rq, delta);
2781 	}
2782 }
2783 
2784 /*
2785  * called from enqueue/dequeue and updates the hrtick when the
2786  * current task is from our class and nr_running is low enough
2787  * to matter.
2788  */
hrtick_update(struct rq * rq)2789 static void hrtick_update(struct rq *rq)
2790 {
2791 	struct task_struct *curr = rq->curr;
2792 
2793 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
2794 		return;
2795 
2796 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2797 		hrtick_start_fair(rq, curr);
2798 }
2799 #else /* !CONFIG_SCHED_HRTICK */
2800 static inline void
hrtick_start_fair(struct rq * rq,struct task_struct * p)2801 hrtick_start_fair(struct rq *rq, struct task_struct *p)
2802 {
2803 }
2804 
hrtick_update(struct rq * rq)2805 static inline void hrtick_update(struct rq *rq)
2806 {
2807 }
2808 #endif
2809 
2810 /*
2811  * The enqueue_task method is called before nr_running is
2812  * increased. Here we update the fair scheduling stats and
2813  * then put the task into the rbtree:
2814  */
2815 static void
enqueue_task_fair(struct rq * rq,struct task_struct * p,int flags)2816 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2817 {
2818 	struct cfs_rq *cfs_rq;
2819 	struct sched_entity *se = &p->se;
2820 
2821 	for_each_sched_entity(se) {
2822 		if (se->on_rq)
2823 			break;
2824 		cfs_rq = cfs_rq_of(se);
2825 		enqueue_entity(cfs_rq, se, flags);
2826 
2827 		/*
2828 		 * end evaluation on encountering a throttled cfs_rq
2829 		 *
2830 		 * note: in the case of encountering a throttled cfs_rq we will
2831 		 * post the final h_nr_running increment below.
2832 		*/
2833 		if (cfs_rq_throttled(cfs_rq))
2834 			break;
2835 		cfs_rq->h_nr_running++;
2836 
2837 		flags = ENQUEUE_WAKEUP;
2838 	}
2839 
2840 	for_each_sched_entity(se) {
2841 		cfs_rq = cfs_rq_of(se);
2842 		cfs_rq->h_nr_running++;
2843 
2844 		if (cfs_rq_throttled(cfs_rq))
2845 			break;
2846 
2847 		update_cfs_shares(cfs_rq);
2848 		update_entity_load_avg(se, 1);
2849 	}
2850 
2851 	if (!se) {
2852 		update_rq_runnable_avg(rq, rq->nr_running);
2853 		inc_nr_running(rq);
2854 	}
2855 	hrtick_update(rq);
2856 }
2857 
2858 static void set_next_buddy(struct sched_entity *se);
2859 
2860 /*
2861  * The dequeue_task method is called before nr_running is
2862  * decreased. We remove the task from the rbtree and
2863  * update the fair scheduling stats:
2864  */
dequeue_task_fair(struct rq * rq,struct task_struct * p,int flags)2865 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2866 {
2867 	struct cfs_rq *cfs_rq;
2868 	struct sched_entity *se = &p->se;
2869 	int task_sleep = flags & DEQUEUE_SLEEP;
2870 
2871 	for_each_sched_entity(se) {
2872 		cfs_rq = cfs_rq_of(se);
2873 		dequeue_entity(cfs_rq, se, flags);
2874 
2875 		/*
2876 		 * end evaluation on encountering a throttled cfs_rq
2877 		 *
2878 		 * note: in the case of encountering a throttled cfs_rq we will
2879 		 * post the final h_nr_running decrement below.
2880 		*/
2881 		if (cfs_rq_throttled(cfs_rq))
2882 			break;
2883 		cfs_rq->h_nr_running--;
2884 
2885 		/* Don't dequeue parent if it has other entities besides us */
2886 		if (cfs_rq->load.weight) {
2887 			/*
2888 			 * Bias pick_next to pick a task from this cfs_rq, as
2889 			 * p is sleeping when it is within its sched_slice.
2890 			 */
2891 			if (task_sleep && parent_entity(se))
2892 				set_next_buddy(parent_entity(se));
2893 
2894 			/* avoid re-evaluating load for this entity */
2895 			se = parent_entity(se);
2896 			break;
2897 		}
2898 		flags |= DEQUEUE_SLEEP;
2899 	}
2900 
2901 	for_each_sched_entity(se) {
2902 		cfs_rq = cfs_rq_of(se);
2903 		cfs_rq->h_nr_running--;
2904 
2905 		if (cfs_rq_throttled(cfs_rq))
2906 			break;
2907 
2908 		update_cfs_shares(cfs_rq);
2909 		update_entity_load_avg(se, 1);
2910 	}
2911 
2912 	if (!se) {
2913 		dec_nr_running(rq);
2914 		update_rq_runnable_avg(rq, 1);
2915 	}
2916 	hrtick_update(rq);
2917 }
2918 
2919 #ifdef CONFIG_SMP
2920 /* Used instead of source_load when we know the type == 0 */
weighted_cpuload(const int cpu)2921 static unsigned long weighted_cpuload(const int cpu)
2922 {
2923 	return cpu_rq(cpu)->load.weight;
2924 }
2925 
2926 /*
2927  * Return a low guess at the load of a migration-source cpu weighted
2928  * according to the scheduling class and "nice" value.
2929  *
2930  * We want to under-estimate the load of migration sources, to
2931  * balance conservatively.
2932  */
source_load(int cpu,int type)2933 static unsigned long source_load(int cpu, int type)
2934 {
2935 	struct rq *rq = cpu_rq(cpu);
2936 	unsigned long total = weighted_cpuload(cpu);
2937 
2938 	if (type == 0 || !sched_feat(LB_BIAS))
2939 		return total;
2940 
2941 	return min(rq->cpu_load[type-1], total);
2942 }
2943 
2944 /*
2945  * Return a high guess at the load of a migration-target cpu weighted
2946  * according to the scheduling class and "nice" value.
2947  */
target_load(int cpu,int type)2948 static unsigned long target_load(int cpu, int type)
2949 {
2950 	struct rq *rq = cpu_rq(cpu);
2951 	unsigned long total = weighted_cpuload(cpu);
2952 
2953 	if (type == 0 || !sched_feat(LB_BIAS))
2954 		return total;
2955 
2956 	return max(rq->cpu_load[type-1], total);
2957 }
2958 
power_of(int cpu)2959 static unsigned long power_of(int cpu)
2960 {
2961 	return cpu_rq(cpu)->cpu_power;
2962 }
2963 
cpu_avg_load_per_task(int cpu)2964 static unsigned long cpu_avg_load_per_task(int cpu)
2965 {
2966 	struct rq *rq = cpu_rq(cpu);
2967 	unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
2968 
2969 	if (nr_running)
2970 		return rq->load.weight / nr_running;
2971 
2972 	return 0;
2973 }
2974 
2975 
task_waking_fair(struct task_struct * p)2976 static void task_waking_fair(struct task_struct *p)
2977 {
2978 	struct sched_entity *se = &p->se;
2979 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
2980 	u64 min_vruntime;
2981 
2982 #ifndef CONFIG_64BIT
2983 	u64 min_vruntime_copy;
2984 
2985 	do {
2986 		min_vruntime_copy = cfs_rq->min_vruntime_copy;
2987 		smp_rmb();
2988 		min_vruntime = cfs_rq->min_vruntime;
2989 	} while (min_vruntime != min_vruntime_copy);
2990 #else
2991 	min_vruntime = cfs_rq->min_vruntime;
2992 #endif
2993 
2994 	se->vruntime -= min_vruntime;
2995 }
2996 
2997 #ifdef CONFIG_FAIR_GROUP_SCHED
2998 /*
2999  * effective_load() calculates the load change as seen from the root_task_group
3000  *
3001  * Adding load to a group doesn't make a group heavier, but can cause movement
3002  * of group shares between cpus. Assuming the shares were perfectly aligned one
3003  * can calculate the shift in shares.
3004  *
3005  * Calculate the effective load difference if @wl is added (subtracted) to @tg
3006  * on this @cpu and results in a total addition (subtraction) of @wg to the
3007  * total group weight.
3008  *
3009  * Given a runqueue weight distribution (rw_i) we can compute a shares
3010  * distribution (s_i) using:
3011  *
3012  *   s_i = rw_i / \Sum rw_j						(1)
3013  *
3014  * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3015  * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3016  * shares distribution (s_i):
3017  *
3018  *   rw_i = {   2,   4,   1,   0 }
3019  *   s_i  = { 2/7, 4/7, 1/7,   0 }
3020  *
3021  * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3022  * task used to run on and the CPU the waker is running on), we need to
3023  * compute the effect of waking a task on either CPU and, in case of a sync
3024  * wakeup, compute the effect of the current task going to sleep.
3025  *
3026  * So for a change of @wl to the local @cpu with an overall group weight change
3027  * of @wl we can compute the new shares distribution (s'_i) using:
3028  *
3029  *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)				(2)
3030  *
3031  * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3032  * differences in waking a task to CPU 0. The additional task changes the
3033  * weight and shares distributions like:
3034  *
3035  *   rw'_i = {   3,   4,   1,   0 }
3036  *   s'_i  = { 3/8, 4/8, 1/8,   0 }
3037  *
3038  * We can then compute the difference in effective weight by using:
3039  *
3040  *   dw_i = S * (s'_i - s_i)						(3)
3041  *
3042  * Where 'S' is the group weight as seen by its parent.
3043  *
3044  * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3045  * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3046  * 4/7) times the weight of the group.
3047  */
effective_load(struct task_group * tg,int cpu,long wl,long wg)3048 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
3049 {
3050 	struct sched_entity *se = tg->se[cpu];
3051 
3052 	if (!tg->parent)	/* the trivial, non-cgroup case */
3053 		return wl;
3054 
3055 	for_each_sched_entity(se) {
3056 		long w, W;
3057 
3058 		tg = se->my_q->tg;
3059 
3060 		/*
3061 		 * W = @wg + \Sum rw_j
3062 		 */
3063 		W = wg + calc_tg_weight(tg, se->my_q);
3064 
3065 		/*
3066 		 * w = rw_i + @wl
3067 		 */
3068 		w = se->my_q->load.weight + wl;
3069 
3070 		/*
3071 		 * wl = S * s'_i; see (2)
3072 		 */
3073 		if (W > 0 && w < W)
3074 			wl = (w * tg->shares) / W;
3075 		else
3076 			wl = tg->shares;
3077 
3078 		/*
3079 		 * Per the above, wl is the new se->load.weight value; since
3080 		 * those are clipped to [MIN_SHARES, ...) do so now. See
3081 		 * calc_cfs_shares().
3082 		 */
3083 		if (wl < MIN_SHARES)
3084 			wl = MIN_SHARES;
3085 
3086 		/*
3087 		 * wl = dw_i = S * (s'_i - s_i); see (3)
3088 		 */
3089 		wl -= se->load.weight;
3090 
3091 		/*
3092 		 * Recursively apply this logic to all parent groups to compute
3093 		 * the final effective load change on the root group. Since
3094 		 * only the @tg group gets extra weight, all parent groups can
3095 		 * only redistribute existing shares. @wl is the shift in shares
3096 		 * resulting from this level per the above.
3097 		 */
3098 		wg = 0;
3099 	}
3100 
3101 	return wl;
3102 }
3103 #else
3104 
effective_load(struct task_group * tg,int cpu,unsigned long wl,unsigned long wg)3105 static inline unsigned long effective_load(struct task_group *tg, int cpu,
3106 		unsigned long wl, unsigned long wg)
3107 {
3108 	return wl;
3109 }
3110 
3111 #endif
3112 
wake_affine(struct sched_domain * sd,struct task_struct * p,int sync)3113 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
3114 {
3115 	s64 this_load, load;
3116 	int idx, this_cpu, prev_cpu;
3117 	unsigned long tl_per_task;
3118 	struct task_group *tg;
3119 	unsigned long weight;
3120 	int balanced;
3121 
3122 	idx	  = sd->wake_idx;
3123 	this_cpu  = smp_processor_id();
3124 	prev_cpu  = task_cpu(p);
3125 	load	  = source_load(prev_cpu, idx);
3126 	this_load = target_load(this_cpu, idx);
3127 
3128 	/*
3129 	 * If sync wakeup then subtract the (maximum possible)
3130 	 * effect of the currently running task from the load
3131 	 * of the current CPU:
3132 	 */
3133 	if (sync) {
3134 		tg = task_group(current);
3135 		weight = current->se.load.weight;
3136 
3137 		this_load += effective_load(tg, this_cpu, -weight, -weight);
3138 		load += effective_load(tg, prev_cpu, 0, -weight);
3139 	}
3140 
3141 	tg = task_group(p);
3142 	weight = p->se.load.weight;
3143 
3144 	/*
3145 	 * In low-load situations, where prev_cpu is idle and this_cpu is idle
3146 	 * due to the sync cause above having dropped this_load to 0, we'll
3147 	 * always have an imbalance, but there's really nothing you can do
3148 	 * about that, so that's good too.
3149 	 *
3150 	 * Otherwise check if either cpus are near enough in load to allow this
3151 	 * task to be woken on this_cpu.
3152 	 */
3153 	if (this_load > 0) {
3154 		s64 this_eff_load, prev_eff_load;
3155 
3156 		this_eff_load = 100;
3157 		this_eff_load *= power_of(prev_cpu);
3158 		this_eff_load *= this_load +
3159 			effective_load(tg, this_cpu, weight, weight);
3160 
3161 		prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3162 		prev_eff_load *= power_of(this_cpu);
3163 		prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3164 
3165 		balanced = this_eff_load <= prev_eff_load;
3166 	} else
3167 		balanced = true;
3168 
3169 	/*
3170 	 * If the currently running task will sleep within
3171 	 * a reasonable amount of time then attract this newly
3172 	 * woken task:
3173 	 */
3174 	if (sync && balanced)
3175 		return 1;
3176 
3177 	schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
3178 	tl_per_task = cpu_avg_load_per_task(this_cpu);
3179 
3180 	if (balanced ||
3181 	    (this_load <= load &&
3182 	     this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
3183 		/*
3184 		 * This domain has SD_WAKE_AFFINE and
3185 		 * p is cache cold in this domain, and
3186 		 * there is no bad imbalance.
3187 		 */
3188 		schedstat_inc(sd, ttwu_move_affine);
3189 		schedstat_inc(p, se.statistics.nr_wakeups_affine);
3190 
3191 		return 1;
3192 	}
3193 	return 0;
3194 }
3195 
3196 /*
3197  * find_idlest_group finds and returns the least busy CPU group within the
3198  * domain.
3199  */
3200 static struct sched_group *
find_idlest_group(struct sched_domain * sd,struct task_struct * p,int this_cpu,int load_idx)3201 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
3202 		  int this_cpu, int load_idx)
3203 {
3204 	struct sched_group *idlest = NULL, *group = sd->groups;
3205 	unsigned long min_load = ULONG_MAX, this_load = 0;
3206 	int imbalance = 100 + (sd->imbalance_pct-100)/2;
3207 
3208 	do {
3209 		unsigned long load, avg_load;
3210 		int local_group;
3211 		int i;
3212 
3213 		/* Skip over this group if it has no CPUs allowed */
3214 		if (!cpumask_intersects(sched_group_cpus(group),
3215 					tsk_cpus_allowed(p)))
3216 			continue;
3217 
3218 		local_group = cpumask_test_cpu(this_cpu,
3219 					       sched_group_cpus(group));
3220 
3221 		/* Tally up the load of all CPUs in the group */
3222 		avg_load = 0;
3223 
3224 		for_each_cpu(i, sched_group_cpus(group)) {
3225 			/* Bias balancing toward cpus of our domain */
3226 			if (local_group)
3227 				load = source_load(i, load_idx);
3228 			else
3229 				load = target_load(i, load_idx);
3230 
3231 			avg_load += load;
3232 		}
3233 
3234 		/* Adjust by relative CPU power of the group */
3235 		avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
3236 
3237 		if (local_group) {
3238 			this_load = avg_load;
3239 		} else if (avg_load < min_load) {
3240 			min_load = avg_load;
3241 			idlest = group;
3242 		}
3243 	} while (group = group->next, group != sd->groups);
3244 
3245 	if (!idlest || 100*this_load < imbalance*min_load)
3246 		return NULL;
3247 	return idlest;
3248 }
3249 
3250 /*
3251  * find_idlest_cpu - find the idlest cpu among the cpus in group.
3252  */
3253 static int
find_idlest_cpu(struct sched_group * group,struct task_struct * p,int this_cpu)3254 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3255 {
3256 	unsigned long load, min_load = ULONG_MAX;
3257 	int idlest = -1;
3258 	int i;
3259 
3260 	/* Traverse only the allowed CPUs */
3261 	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
3262 		load = weighted_cpuload(i);
3263 
3264 		if (load < min_load || (load == min_load && i == this_cpu)) {
3265 			min_load = load;
3266 			idlest = i;
3267 		}
3268 	}
3269 
3270 	return idlest;
3271 }
3272 
3273 /*
3274  * Try and locate an idle CPU in the sched_domain.
3275  */
select_idle_sibling(struct task_struct * p,int target)3276 static int select_idle_sibling(struct task_struct *p, int target)
3277 {
3278 	struct sched_domain *sd;
3279 	struct sched_group *sg;
3280 	int i = task_cpu(p);
3281 
3282 	if (idle_cpu(target))
3283 		return target;
3284 
3285 	/*
3286 	 * If the prevous cpu is cache affine and idle, don't be stupid.
3287 	 */
3288 	if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3289 		return i;
3290 
3291 	/*
3292 	 * Otherwise, iterate the domains and find an elegible idle cpu.
3293 	 */
3294 	sd = rcu_dereference(per_cpu(sd_llc, target));
3295 	for_each_lower_domain(sd) {
3296 		sg = sd->groups;
3297 		do {
3298 			if (!cpumask_intersects(sched_group_cpus(sg),
3299 						tsk_cpus_allowed(p)))
3300 				goto next;
3301 
3302 			for_each_cpu(i, sched_group_cpus(sg)) {
3303 				if (i == target || !idle_cpu(i))
3304 					goto next;
3305 			}
3306 
3307 			target = cpumask_first_and(sched_group_cpus(sg),
3308 					tsk_cpus_allowed(p));
3309 			goto done;
3310 next:
3311 			sg = sg->next;
3312 		} while (sg != sd->groups);
3313 	}
3314 done:
3315 	return target;
3316 }
3317 
3318 /*
3319  * sched_balance_self: balance the current task (running on cpu) in domains
3320  * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3321  * SD_BALANCE_EXEC.
3322  *
3323  * Balance, ie. select the least loaded group.
3324  *
3325  * Returns the target CPU number, or the same CPU if no balancing is needed.
3326  *
3327  * preempt must be disabled.
3328  */
3329 static int
select_task_rq_fair(struct task_struct * p,int sd_flag,int wake_flags)3330 select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
3331 {
3332 	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
3333 	int cpu = smp_processor_id();
3334 	int prev_cpu = task_cpu(p);
3335 	int new_cpu = cpu;
3336 	int want_affine = 0;
3337 	int sync = wake_flags & WF_SYNC;
3338 
3339 	if (p->nr_cpus_allowed == 1)
3340 		return prev_cpu;
3341 
3342 	if (sd_flag & SD_BALANCE_WAKE) {
3343 		if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
3344 			want_affine = 1;
3345 		new_cpu = prev_cpu;
3346 	}
3347 
3348 	rcu_read_lock();
3349 	for_each_domain(cpu, tmp) {
3350 		if (!(tmp->flags & SD_LOAD_BALANCE))
3351 			continue;
3352 
3353 		/*
3354 		 * If both cpu and prev_cpu are part of this domain,
3355 		 * cpu is a valid SD_WAKE_AFFINE target.
3356 		 */
3357 		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3358 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3359 			affine_sd = tmp;
3360 			break;
3361 		}
3362 
3363 		if (tmp->flags & sd_flag)
3364 			sd = tmp;
3365 	}
3366 
3367 	if (affine_sd) {
3368 		if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
3369 			prev_cpu = cpu;
3370 
3371 		new_cpu = select_idle_sibling(p, prev_cpu);
3372 		goto unlock;
3373 	}
3374 
3375 	while (sd) {
3376 		int load_idx = sd->forkexec_idx;
3377 		struct sched_group *group;
3378 		int weight;
3379 
3380 		if (!(sd->flags & sd_flag)) {
3381 			sd = sd->child;
3382 			continue;
3383 		}
3384 
3385 		if (sd_flag & SD_BALANCE_WAKE)
3386 			load_idx = sd->wake_idx;
3387 
3388 		group = find_idlest_group(sd, p, cpu, load_idx);
3389 		if (!group) {
3390 			sd = sd->child;
3391 			continue;
3392 		}
3393 
3394 		new_cpu = find_idlest_cpu(group, p, cpu);
3395 		if (new_cpu == -1 || new_cpu == cpu) {
3396 			/* Now try balancing at a lower domain level of cpu */
3397 			sd = sd->child;
3398 			continue;
3399 		}
3400 
3401 		/* Now try balancing at a lower domain level of new_cpu */
3402 		cpu = new_cpu;
3403 		weight = sd->span_weight;
3404 		sd = NULL;
3405 		for_each_domain(cpu, tmp) {
3406 			if (weight <= tmp->span_weight)
3407 				break;
3408 			if (tmp->flags & sd_flag)
3409 				sd = tmp;
3410 		}
3411 		/* while loop will break here if sd == NULL */
3412 	}
3413 unlock:
3414 	rcu_read_unlock();
3415 
3416 	return new_cpu;
3417 }
3418 
3419 /*
3420  * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
3421  * removed when useful for applications beyond shares distribution (e.g.
3422  * load-balance).
3423  */
3424 #ifdef CONFIG_FAIR_GROUP_SCHED
3425 /*
3426  * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3427  * cfs_rq_of(p) references at time of call are still valid and identify the
3428  * previous cpu.  However, the caller only guarantees p->pi_lock is held; no
3429  * other assumptions, including the state of rq->lock, should be made.
3430  */
3431 static void
migrate_task_rq_fair(struct task_struct * p,int next_cpu)3432 migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3433 {
3434 	struct sched_entity *se = &p->se;
3435 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
3436 
3437 	/*
3438 	 * Load tracking: accumulate removed load so that it can be processed
3439 	 * when we next update owning cfs_rq under rq->lock.  Tasks contribute
3440 	 * to blocked load iff they have a positive decay-count.  It can never
3441 	 * be negative here since on-rq tasks have decay-count == 0.
3442 	 */
3443 	if (se->avg.decay_count) {
3444 		se->avg.decay_count = -__synchronize_entity_decay(se);
3445 		atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
3446 	}
3447 }
3448 #endif
3449 #endif /* CONFIG_SMP */
3450 
3451 static unsigned long
wakeup_gran(struct sched_entity * curr,struct sched_entity * se)3452 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
3453 {
3454 	unsigned long gran = sysctl_sched_wakeup_granularity;
3455 
3456 	/*
3457 	 * Since its curr running now, convert the gran from real-time
3458 	 * to virtual-time in his units.
3459 	 *
3460 	 * By using 'se' instead of 'curr' we penalize light tasks, so
3461 	 * they get preempted easier. That is, if 'se' < 'curr' then
3462 	 * the resulting gran will be larger, therefore penalizing the
3463 	 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3464 	 * be smaller, again penalizing the lighter task.
3465 	 *
3466 	 * This is especially important for buddies when the leftmost
3467 	 * task is higher priority than the buddy.
3468 	 */
3469 	return calc_delta_fair(gran, se);
3470 }
3471 
3472 /*
3473  * Should 'se' preempt 'curr'.
3474  *
3475  *             |s1
3476  *        |s2
3477  *   |s3
3478  *         g
3479  *      |<--->|c
3480  *
3481  *  w(c, s1) = -1
3482  *  w(c, s2) =  0
3483  *  w(c, s3) =  1
3484  *
3485  */
3486 static int
wakeup_preempt_entity(struct sched_entity * curr,struct sched_entity * se)3487 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3488 {
3489 	s64 gran, vdiff = curr->vruntime - se->vruntime;
3490 
3491 	if (vdiff <= 0)
3492 		return -1;
3493 
3494 	gran = wakeup_gran(curr, se);
3495 	if (vdiff > gran)
3496 		return 1;
3497 
3498 	return 0;
3499 }
3500 
set_last_buddy(struct sched_entity * se)3501 static void set_last_buddy(struct sched_entity *se)
3502 {
3503 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3504 		return;
3505 
3506 	for_each_sched_entity(se)
3507 		cfs_rq_of(se)->last = se;
3508 }
3509 
set_next_buddy(struct sched_entity * se)3510 static void set_next_buddy(struct sched_entity *se)
3511 {
3512 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3513 		return;
3514 
3515 	for_each_sched_entity(se)
3516 		cfs_rq_of(se)->next = se;
3517 }
3518 
set_skip_buddy(struct sched_entity * se)3519 static void set_skip_buddy(struct sched_entity *se)
3520 {
3521 	for_each_sched_entity(se)
3522 		cfs_rq_of(se)->skip = se;
3523 }
3524 
3525 /*
3526  * Preempt the current task with a newly woken task if needed:
3527  */
check_preempt_wakeup(struct rq * rq,struct task_struct * p,int wake_flags)3528 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
3529 {
3530 	struct task_struct *curr = rq->curr;
3531 	struct sched_entity *se = &curr->se, *pse = &p->se;
3532 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3533 	int scale = cfs_rq->nr_running >= sched_nr_latency;
3534 	int next_buddy_marked = 0;
3535 
3536 	if (unlikely(se == pse))
3537 		return;
3538 
3539 	/*
3540 	 * This is possible from callers such as move_task(), in which we
3541 	 * unconditionally check_prempt_curr() after an enqueue (which may have
3542 	 * lead to a throttle).  This both saves work and prevents false
3543 	 * next-buddy nomination below.
3544 	 */
3545 	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3546 		return;
3547 
3548 	if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3549 		set_next_buddy(pse);
3550 		next_buddy_marked = 1;
3551 	}
3552 
3553 	/*
3554 	 * We can come here with TIF_NEED_RESCHED already set from new task
3555 	 * wake up path.
3556 	 *
3557 	 * Note: this also catches the edge-case of curr being in a throttled
3558 	 * group (e.g. via set_curr_task), since update_curr() (in the
3559 	 * enqueue of curr) will have resulted in resched being set.  This
3560 	 * prevents us from potentially nominating it as a false LAST_BUDDY
3561 	 * below.
3562 	 */
3563 	if (test_tsk_need_resched(curr))
3564 		return;
3565 
3566 	/* Idle tasks are by definition preempted by non-idle tasks. */
3567 	if (unlikely(curr->policy == SCHED_IDLE) &&
3568 	    likely(p->policy != SCHED_IDLE))
3569 		goto preempt;
3570 
3571 	/*
3572 	 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3573 	 * is driven by the tick):
3574 	 */
3575 	if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
3576 		return;
3577 
3578 	find_matching_se(&se, &pse);
3579 	update_curr(cfs_rq_of(se));
3580 	BUG_ON(!pse);
3581 	if (wakeup_preempt_entity(se, pse) == 1) {
3582 		/*
3583 		 * Bias pick_next to pick the sched entity that is
3584 		 * triggering this preemption.
3585 		 */
3586 		if (!next_buddy_marked)
3587 			set_next_buddy(pse);
3588 		goto preempt;
3589 	}
3590 
3591 	return;
3592 
3593 preempt:
3594 	resched_task(curr);
3595 	/*
3596 	 * Only set the backward buddy when the current task is still
3597 	 * on the rq. This can happen when a wakeup gets interleaved
3598 	 * with schedule on the ->pre_schedule() or idle_balance()
3599 	 * point, either of which can * drop the rq lock.
3600 	 *
3601 	 * Also, during early boot the idle thread is in the fair class,
3602 	 * for obvious reasons its a bad idea to schedule back to it.
3603 	 */
3604 	if (unlikely(!se->on_rq || curr == rq->idle))
3605 		return;
3606 
3607 	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3608 		set_last_buddy(se);
3609 }
3610 
pick_next_task_fair(struct rq * rq)3611 static struct task_struct *pick_next_task_fair(struct rq *rq)
3612 {
3613 	struct task_struct *p;
3614 	struct cfs_rq *cfs_rq = &rq->cfs;
3615 	struct sched_entity *se;
3616 
3617 	if (!cfs_rq->nr_running)
3618 		return NULL;
3619 
3620 	do {
3621 		se = pick_next_entity(cfs_rq);
3622 		set_next_entity(cfs_rq, se);
3623 		cfs_rq = group_cfs_rq(se);
3624 	} while (cfs_rq);
3625 
3626 	p = task_of(se);
3627 	if (hrtick_enabled(rq))
3628 		hrtick_start_fair(rq, p);
3629 
3630 	return p;
3631 }
3632 
3633 /*
3634  * Account for a descheduled task:
3635  */
put_prev_task_fair(struct rq * rq,struct task_struct * prev)3636 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
3637 {
3638 	struct sched_entity *se = &prev->se;
3639 	struct cfs_rq *cfs_rq;
3640 
3641 	for_each_sched_entity(se) {
3642 		cfs_rq = cfs_rq_of(se);
3643 		put_prev_entity(cfs_rq, se);
3644 	}
3645 }
3646 
3647 /*
3648  * sched_yield() is very simple
3649  *
3650  * The magic of dealing with the ->skip buddy is in pick_next_entity.
3651  */
yield_task_fair(struct rq * rq)3652 static void yield_task_fair(struct rq *rq)
3653 {
3654 	struct task_struct *curr = rq->curr;
3655 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3656 	struct sched_entity *se = &curr->se;
3657 
3658 	/*
3659 	 * Are we the only task in the tree?
3660 	 */
3661 	if (unlikely(rq->nr_running == 1))
3662 		return;
3663 
3664 	clear_buddies(cfs_rq, se);
3665 
3666 	if (curr->policy != SCHED_BATCH) {
3667 		update_rq_clock(rq);
3668 		/*
3669 		 * Update run-time statistics of the 'current'.
3670 		 */
3671 		update_curr(cfs_rq);
3672 		/*
3673 		 * Tell update_rq_clock() that we've just updated,
3674 		 * so we don't do microscopic update in schedule()
3675 		 * and double the fastpath cost.
3676 		 */
3677 		 rq->skip_clock_update = 1;
3678 	}
3679 
3680 	set_skip_buddy(se);
3681 }
3682 
yield_to_task_fair(struct rq * rq,struct task_struct * p,bool preempt)3683 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3684 {
3685 	struct sched_entity *se = &p->se;
3686 
3687 	/* throttled hierarchies are not runnable */
3688 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
3689 		return false;
3690 
3691 	/* Tell the scheduler that we'd really like pse to run next. */
3692 	set_next_buddy(se);
3693 
3694 	yield_task_fair(rq);
3695 
3696 	return true;
3697 }
3698 
3699 #ifdef CONFIG_SMP
3700 /**************************************************
3701  * Fair scheduling class load-balancing methods.
3702  *
3703  * BASICS
3704  *
3705  * The purpose of load-balancing is to achieve the same basic fairness the
3706  * per-cpu scheduler provides, namely provide a proportional amount of compute
3707  * time to each task. This is expressed in the following equation:
3708  *
3709  *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
3710  *
3711  * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
3712  * W_i,0 is defined as:
3713  *
3714  *   W_i,0 = \Sum_j w_i,j                                             (2)
3715  *
3716  * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
3717  * is derived from the nice value as per prio_to_weight[].
3718  *
3719  * The weight average is an exponential decay average of the instantaneous
3720  * weight:
3721  *
3722  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
3723  *
3724  * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
3725  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
3726  * can also include other factors [XXX].
3727  *
3728  * To achieve this balance we define a measure of imbalance which follows
3729  * directly from (1):
3730  *
3731  *   imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j }    (4)
3732  *
3733  * We them move tasks around to minimize the imbalance. In the continuous
3734  * function space it is obvious this converges, in the discrete case we get
3735  * a few fun cases generally called infeasible weight scenarios.
3736  *
3737  * [XXX expand on:
3738  *     - infeasible weights;
3739  *     - local vs global optima in the discrete case. ]
3740  *
3741  *
3742  * SCHED DOMAINS
3743  *
3744  * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
3745  * for all i,j solution, we create a tree of cpus that follows the hardware
3746  * topology where each level pairs two lower groups (or better). This results
3747  * in O(log n) layers. Furthermore we reduce the number of cpus going up the
3748  * tree to only the first of the previous level and we decrease the frequency
3749  * of load-balance at each level inv. proportional to the number of cpus in
3750  * the groups.
3751  *
3752  * This yields:
3753  *
3754  *     log_2 n     1     n
3755  *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
3756  *     i = 0      2^i   2^i
3757  *                               `- size of each group
3758  *         |         |     `- number of cpus doing load-balance
3759  *         |         `- freq
3760  *         `- sum over all levels
3761  *
3762  * Coupled with a limit on how many tasks we can migrate every balance pass,
3763  * this makes (5) the runtime complexity of the balancer.
3764  *
3765  * An important property here is that each CPU is still (indirectly) connected
3766  * to every other cpu in at most O(log n) steps:
3767  *
3768  * The adjacency matrix of the resulting graph is given by:
3769  *
3770  *             log_2 n
3771  *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
3772  *             k = 0
3773  *
3774  * And you'll find that:
3775  *
3776  *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
3777  *
3778  * Showing there's indeed a path between every cpu in at most O(log n) steps.
3779  * The task movement gives a factor of O(m), giving a convergence complexity
3780  * of:
3781  *
3782  *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
3783  *
3784  *
3785  * WORK CONSERVING
3786  *
3787  * In order to avoid CPUs going idle while there's still work to do, new idle
3788  * balancing is more aggressive and has the newly idle cpu iterate up the domain
3789  * tree itself instead of relying on other CPUs to bring it work.
3790  *
3791  * This adds some complexity to both (5) and (8) but it reduces the total idle
3792  * time.
3793  *
3794  * [XXX more?]
3795  *
3796  *
3797  * CGROUPS
3798  *
3799  * Cgroups make a horror show out of (2), instead of a simple sum we get:
3800  *
3801  *                                s_k,i
3802  *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
3803  *                                 S_k
3804  *
3805  * Where
3806  *
3807  *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
3808  *
3809  * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
3810  *
3811  * The big problem is S_k, its a global sum needed to compute a local (W_i)
3812  * property.
3813  *
3814  * [XXX write more on how we solve this.. _after_ merging pjt's patches that
3815  *      rewrite all of this once again.]
3816  */
3817 
3818 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3819 
3820 #define LBF_ALL_PINNED	0x01
3821 #define LBF_NEED_BREAK	0x02
3822 #define LBF_SOME_PINNED 0x04
3823 
3824 struct lb_env {
3825 	struct sched_domain	*sd;
3826 
3827 	struct rq		*src_rq;
3828 	int			src_cpu;
3829 
3830 	int			dst_cpu;
3831 	struct rq		*dst_rq;
3832 
3833 	struct cpumask		*dst_grpmask;
3834 	int			new_dst_cpu;
3835 	enum cpu_idle_type	idle;
3836 	long			imbalance;
3837 	/* The set of CPUs under consideration for load-balancing */
3838 	struct cpumask		*cpus;
3839 
3840 	unsigned int		flags;
3841 
3842 	unsigned int		loop;
3843 	unsigned int		loop_break;
3844 	unsigned int		loop_max;
3845 };
3846 
3847 /*
3848  * move_task - move a task from one runqueue to another runqueue.
3849  * Both runqueues must be locked.
3850  */
move_task(struct task_struct * p,struct lb_env * env)3851 static void move_task(struct task_struct *p, struct lb_env *env)
3852 {
3853 	deactivate_task(env->src_rq, p, 0);
3854 	set_task_cpu(p, env->dst_cpu);
3855 	activate_task(env->dst_rq, p, 0);
3856 	check_preempt_curr(env->dst_rq, p, 0);
3857 }
3858 
3859 /*
3860  * Is this task likely cache-hot:
3861  */
3862 static int
task_hot(struct task_struct * p,u64 now,struct sched_domain * sd)3863 task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3864 {
3865 	s64 delta;
3866 
3867 	if (p->sched_class != &fair_sched_class)
3868 		return 0;
3869 
3870 	if (unlikely(p->policy == SCHED_IDLE))
3871 		return 0;
3872 
3873 	/*
3874 	 * Buddy candidates are cache hot:
3875 	 */
3876 	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3877 			(&p->se == cfs_rq_of(&p->se)->next ||
3878 			 &p->se == cfs_rq_of(&p->se)->last))
3879 		return 1;
3880 
3881 	if (sysctl_sched_migration_cost == -1)
3882 		return 1;
3883 	if (sysctl_sched_migration_cost == 0)
3884 		return 0;
3885 
3886 	delta = now - p->se.exec_start;
3887 
3888 	return delta < (s64)sysctl_sched_migration_cost;
3889 }
3890 
3891 /*
3892  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3893  */
3894 static
can_migrate_task(struct task_struct * p,struct lb_env * env)3895 int can_migrate_task(struct task_struct *p, struct lb_env *env)
3896 {
3897 	int tsk_cache_hot = 0;
3898 	/*
3899 	 * We do not migrate tasks that are:
3900 	 * 1) throttled_lb_pair, or
3901 	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3902 	 * 3) running (obviously), or
3903 	 * 4) are cache-hot on their current CPU.
3904 	 */
3905 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
3906 		return 0;
3907 
3908 	if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
3909 		int cpu;
3910 
3911 		schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
3912 
3913 		/*
3914 		 * Remember if this task can be migrated to any other cpu in
3915 		 * our sched_group. We may want to revisit it if we couldn't
3916 		 * meet load balance goals by pulling other tasks on src_cpu.
3917 		 *
3918 		 * Also avoid computing new_dst_cpu if we have already computed
3919 		 * one in current iteration.
3920 		 */
3921 		if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
3922 			return 0;
3923 
3924 		/* Prevent to re-select dst_cpu via env's cpus */
3925 		for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
3926 			if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
3927 				env->flags |= LBF_SOME_PINNED;
3928 				env->new_dst_cpu = cpu;
3929 				break;
3930 			}
3931 		}
3932 
3933 		return 0;
3934 	}
3935 
3936 	/* Record that we found atleast one task that could run on dst_cpu */
3937 	env->flags &= ~LBF_ALL_PINNED;
3938 
3939 	if (task_running(env->src_rq, p)) {
3940 		schedstat_inc(p, se.statistics.nr_failed_migrations_running);
3941 		return 0;
3942 	}
3943 
3944 	/*
3945 	 * Aggressive migration if:
3946 	 * 1) task is cache cold, or
3947 	 * 2) too many balance attempts have failed.
3948 	 */
3949 
3950 	tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd);
3951 	if (!tsk_cache_hot ||
3952 		env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
3953 
3954 		if (tsk_cache_hot) {
3955 			schedstat_inc(env->sd, lb_hot_gained[env->idle]);
3956 			schedstat_inc(p, se.statistics.nr_forced_migrations);
3957 		}
3958 
3959 		return 1;
3960 	}
3961 
3962 	schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
3963 	return 0;
3964 }
3965 
3966 /*
3967  * move_one_task tries to move exactly one task from busiest to this_rq, as
3968  * part of active balancing operations within "domain".
3969  * Returns 1 if successful and 0 otherwise.
3970  *
3971  * Called with both runqueues locked.
3972  */
move_one_task(struct lb_env * env)3973 static int move_one_task(struct lb_env *env)
3974 {
3975 	struct task_struct *p, *n;
3976 
3977 	list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
3978 		if (!can_migrate_task(p, env))
3979 			continue;
3980 
3981 		move_task(p, env);
3982 		/*
3983 		 * Right now, this is only the second place move_task()
3984 		 * is called, so we can safely collect move_task()
3985 		 * stats here rather than inside move_task().
3986 		 */
3987 		schedstat_inc(env->sd, lb_gained[env->idle]);
3988 		return 1;
3989 	}
3990 	return 0;
3991 }
3992 
3993 static unsigned long task_h_load(struct task_struct *p);
3994 
3995 static const unsigned int sched_nr_migrate_break = 32;
3996 
3997 /*
3998  * move_tasks tries to move up to imbalance weighted load from busiest to
3999  * this_rq, as part of a balancing operation within domain "sd".
4000  * Returns 1 if successful and 0 otherwise.
4001  *
4002  * Called with both runqueues locked.
4003  */
move_tasks(struct lb_env * env)4004 static int move_tasks(struct lb_env *env)
4005 {
4006 	struct list_head *tasks = &env->src_rq->cfs_tasks;
4007 	struct task_struct *p;
4008 	unsigned long load;
4009 	int pulled = 0;
4010 
4011 	if (env->imbalance <= 0)
4012 		return 0;
4013 
4014 	while (!list_empty(tasks)) {
4015 		p = list_first_entry(tasks, struct task_struct, se.group_node);
4016 
4017 		env->loop++;
4018 		/* We've more or less seen every task there is, call it quits */
4019 		if (env->loop > env->loop_max)
4020 			break;
4021 
4022 		/* take a breather every nr_migrate tasks */
4023 		if (env->loop > env->loop_break) {
4024 			env->loop_break += sched_nr_migrate_break;
4025 			env->flags |= LBF_NEED_BREAK;
4026 			break;
4027 		}
4028 
4029 		if (!can_migrate_task(p, env))
4030 			goto next;
4031 
4032 		load = task_h_load(p);
4033 
4034 		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
4035 			goto next;
4036 
4037 		if ((load / 2) > env->imbalance)
4038 			goto next;
4039 
4040 		move_task(p, env);
4041 		pulled++;
4042 		env->imbalance -= load;
4043 
4044 #ifdef CONFIG_PREEMPT
4045 		/*
4046 		 * NEWIDLE balancing is a source of latency, so preemptible
4047 		 * kernels will stop after the first task is pulled to minimize
4048 		 * the critical section.
4049 		 */
4050 		if (env->idle == CPU_NEWLY_IDLE)
4051 			break;
4052 #endif
4053 
4054 		/*
4055 		 * We only want to steal up to the prescribed amount of
4056 		 * weighted load.
4057 		 */
4058 		if (env->imbalance <= 0)
4059 			break;
4060 
4061 		continue;
4062 next:
4063 		list_move_tail(&p->se.group_node, tasks);
4064 	}
4065 
4066 	/*
4067 	 * Right now, this is one of only two places move_task() is called,
4068 	 * so we can safely collect move_task() stats here rather than
4069 	 * inside move_task().
4070 	 */
4071 	schedstat_add(env->sd, lb_gained[env->idle], pulled);
4072 
4073 	return pulled;
4074 }
4075 
4076 #ifdef CONFIG_FAIR_GROUP_SCHED
4077 /*
4078  * update tg->load_weight by folding this cpu's load_avg
4079  */
__update_blocked_averages_cpu(struct task_group * tg,int cpu)4080 static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
4081 {
4082 	struct sched_entity *se = tg->se[cpu];
4083 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
4084 
4085 	/* throttled entities do not contribute to load */
4086 	if (throttled_hierarchy(cfs_rq))
4087 		return;
4088 
4089 	update_cfs_rq_blocked_load(cfs_rq, 1);
4090 
4091 	if (se) {
4092 		update_entity_load_avg(se, 1);
4093 		/*
4094 		 * We pivot on our runnable average having decayed to zero for
4095 		 * list removal.  This generally implies that all our children
4096 		 * have also been removed (modulo rounding error or bandwidth
4097 		 * control); however, such cases are rare and we can fix these
4098 		 * at enqueue.
4099 		 *
4100 		 * TODO: fix up out-of-order children on enqueue.
4101 		 */
4102 		if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4103 			list_del_leaf_cfs_rq(cfs_rq);
4104 	} else {
4105 		struct rq *rq = rq_of(cfs_rq);
4106 		update_rq_runnable_avg(rq, rq->nr_running);
4107 	}
4108 }
4109 
update_blocked_averages(int cpu)4110 static void update_blocked_averages(int cpu)
4111 {
4112 	struct rq *rq = cpu_rq(cpu);
4113 	struct cfs_rq *cfs_rq;
4114 	unsigned long flags;
4115 
4116 	raw_spin_lock_irqsave(&rq->lock, flags);
4117 	update_rq_clock(rq);
4118 	/*
4119 	 * Iterates the task_group tree in a bottom up fashion, see
4120 	 * list_add_leaf_cfs_rq() for details.
4121 	 */
4122 	for_each_leaf_cfs_rq(rq, cfs_rq) {
4123 		/*
4124 		 * Note: We may want to consider periodically releasing
4125 		 * rq->lock about these updates so that creating many task
4126 		 * groups does not result in continually extending hold time.
4127 		 */
4128 		__update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
4129 	}
4130 
4131 	raw_spin_unlock_irqrestore(&rq->lock, flags);
4132 }
4133 
4134 /*
4135  * Compute the cpu's hierarchical load factor for each task group.
4136  * This needs to be done in a top-down fashion because the load of a child
4137  * group is a fraction of its parents load.
4138  */
tg_load_down(struct task_group * tg,void * data)4139 static int tg_load_down(struct task_group *tg, void *data)
4140 {
4141 	unsigned long load;
4142 	long cpu = (long)data;
4143 
4144 	if (!tg->parent) {
4145 		load = cpu_rq(cpu)->load.weight;
4146 	} else {
4147 		load = tg->parent->cfs_rq[cpu]->h_load;
4148 		load *= tg->se[cpu]->load.weight;
4149 		load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
4150 	}
4151 
4152 	tg->cfs_rq[cpu]->h_load = load;
4153 
4154 	return 0;
4155 }
4156 
update_h_load(long cpu)4157 static void update_h_load(long cpu)
4158 {
4159 	struct rq *rq = cpu_rq(cpu);
4160 	unsigned long now = jiffies;
4161 
4162 	if (rq->h_load_throttle == now)
4163 		return;
4164 
4165 	rq->h_load_throttle = now;
4166 
4167 	rcu_read_lock();
4168 	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
4169 	rcu_read_unlock();
4170 }
4171 
task_h_load(struct task_struct * p)4172 static unsigned long task_h_load(struct task_struct *p)
4173 {
4174 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
4175 	unsigned long load;
4176 
4177 	load = p->se.load.weight;
4178 	load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1);
4179 
4180 	return load;
4181 }
4182 #else
update_blocked_averages(int cpu)4183 static inline void update_blocked_averages(int cpu)
4184 {
4185 }
4186 
update_h_load(long cpu)4187 static inline void update_h_load(long cpu)
4188 {
4189 }
4190 
task_h_load(struct task_struct * p)4191 static unsigned long task_h_load(struct task_struct *p)
4192 {
4193 	return p->se.load.weight;
4194 }
4195 #endif
4196 
4197 /********** Helpers for find_busiest_group ************************/
4198 /*
4199  * sd_lb_stats - Structure to store the statistics of a sched_domain
4200  * 		during load balancing.
4201  */
4202 struct sd_lb_stats {
4203 	struct sched_group *busiest; /* Busiest group in this sd */
4204 	struct sched_group *this;  /* Local group in this sd */
4205 	unsigned long total_load;  /* Total load of all groups in sd */
4206 	unsigned long total_pwr;   /*	Total power of all groups in sd */
4207 	unsigned long avg_load;	   /* Average load across all groups in sd */
4208 
4209 	/** Statistics of this group */
4210 	unsigned long this_load;
4211 	unsigned long this_load_per_task;
4212 	unsigned long this_nr_running;
4213 	unsigned long this_has_capacity;
4214 	unsigned int  this_idle_cpus;
4215 
4216 	/* Statistics of the busiest group */
4217 	unsigned int  busiest_idle_cpus;
4218 	unsigned long max_load;
4219 	unsigned long busiest_load_per_task;
4220 	unsigned long busiest_nr_running;
4221 	unsigned long busiest_group_capacity;
4222 	unsigned long busiest_has_capacity;
4223 	unsigned int  busiest_group_weight;
4224 
4225 	int group_imb; /* Is there imbalance in this sd */
4226 };
4227 
4228 /*
4229  * sg_lb_stats - stats of a sched_group required for load_balancing
4230  */
4231 struct sg_lb_stats {
4232 	unsigned long avg_load; /*Avg load across the CPUs of the group */
4233 	unsigned long group_load; /* Total load over the CPUs of the group */
4234 	unsigned long sum_nr_running; /* Nr tasks running in the group */
4235 	unsigned long sum_weighted_load; /* Weighted load of group's tasks */
4236 	unsigned long group_capacity;
4237 	unsigned long idle_cpus;
4238 	unsigned long group_weight;
4239 	int group_imb; /* Is there an imbalance in the group ? */
4240 	int group_has_capacity; /* Is there extra capacity in the group? */
4241 };
4242 
4243 /**
4244  * get_sd_load_idx - Obtain the load index for a given sched domain.
4245  * @sd: The sched_domain whose load_idx is to be obtained.
4246  * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
4247  */
get_sd_load_idx(struct sched_domain * sd,enum cpu_idle_type idle)4248 static inline int get_sd_load_idx(struct sched_domain *sd,
4249 					enum cpu_idle_type idle)
4250 {
4251 	int load_idx;
4252 
4253 	switch (idle) {
4254 	case CPU_NOT_IDLE:
4255 		load_idx = sd->busy_idx;
4256 		break;
4257 
4258 	case CPU_NEWLY_IDLE:
4259 		load_idx = sd->newidle_idx;
4260 		break;
4261 	default:
4262 		load_idx = sd->idle_idx;
4263 		break;
4264 	}
4265 
4266 	return load_idx;
4267 }
4268 
default_scale_freq_power(struct sched_domain * sd,int cpu)4269 static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
4270 {
4271 	return SCHED_POWER_SCALE;
4272 }
4273 
arch_scale_freq_power(struct sched_domain * sd,int cpu)4274 unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4275 {
4276 	return default_scale_freq_power(sd, cpu);
4277 }
4278 
default_scale_smt_power(struct sched_domain * sd,int cpu)4279 static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
4280 {
4281 	unsigned long weight = sd->span_weight;
4282 	unsigned long smt_gain = sd->smt_gain;
4283 
4284 	smt_gain /= weight;
4285 
4286 	return smt_gain;
4287 }
4288 
arch_scale_smt_power(struct sched_domain * sd,int cpu)4289 unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4290 {
4291 	return default_scale_smt_power(sd, cpu);
4292 }
4293 
scale_rt_power(int cpu)4294 static unsigned long scale_rt_power(int cpu)
4295 {
4296 	struct rq *rq = cpu_rq(cpu);
4297 	u64 total, available, age_stamp, avg;
4298 
4299 	/*
4300 	 * Since we're reading these variables without serialization make sure
4301 	 * we read them once before doing sanity checks on them.
4302 	 */
4303 	age_stamp = ACCESS_ONCE(rq->age_stamp);
4304 	avg = ACCESS_ONCE(rq->rt_avg);
4305 
4306 	total = sched_avg_period() + (rq->clock - age_stamp);
4307 
4308 	if (unlikely(total < avg)) {
4309 		/* Ensures that power won't end up being negative */
4310 		available = 0;
4311 	} else {
4312 		available = total - avg;
4313 	}
4314 
4315 	if (unlikely((s64)total < SCHED_POWER_SCALE))
4316 		total = SCHED_POWER_SCALE;
4317 
4318 	total >>= SCHED_POWER_SHIFT;
4319 
4320 	return div_u64(available, total);
4321 }
4322 
update_cpu_power(struct sched_domain * sd,int cpu)4323 static void update_cpu_power(struct sched_domain *sd, int cpu)
4324 {
4325 	unsigned long weight = sd->span_weight;
4326 	unsigned long power = SCHED_POWER_SCALE;
4327 	struct sched_group *sdg = sd->groups;
4328 
4329 	if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4330 		if (sched_feat(ARCH_POWER))
4331 			power *= arch_scale_smt_power(sd, cpu);
4332 		else
4333 			power *= default_scale_smt_power(sd, cpu);
4334 
4335 		power >>= SCHED_POWER_SHIFT;
4336 	}
4337 
4338 	sdg->sgp->power_orig = power;
4339 
4340 	if (sched_feat(ARCH_POWER))
4341 		power *= arch_scale_freq_power(sd, cpu);
4342 	else
4343 		power *= default_scale_freq_power(sd, cpu);
4344 
4345 	power >>= SCHED_POWER_SHIFT;
4346 
4347 	power *= scale_rt_power(cpu);
4348 	power >>= SCHED_POWER_SHIFT;
4349 
4350 	if (!power)
4351 		power = 1;
4352 
4353 	cpu_rq(cpu)->cpu_power = power;
4354 	sdg->sgp->power = power;
4355 }
4356 
update_group_power(struct sched_domain * sd,int cpu)4357 void update_group_power(struct sched_domain *sd, int cpu)
4358 {
4359 	struct sched_domain *child = sd->child;
4360 	struct sched_group *group, *sdg = sd->groups;
4361 	unsigned long power;
4362 	unsigned long interval;
4363 
4364 	interval = msecs_to_jiffies(sd->balance_interval);
4365 	interval = clamp(interval, 1UL, max_load_balance_interval);
4366 	sdg->sgp->next_update = jiffies + interval;
4367 
4368 	if (!child) {
4369 		update_cpu_power(sd, cpu);
4370 		return;
4371 	}
4372 
4373 	power = 0;
4374 
4375 	if (child->flags & SD_OVERLAP) {
4376 		/*
4377 		 * SD_OVERLAP domains cannot assume that child groups
4378 		 * span the current group.
4379 		 */
4380 
4381 		for_each_cpu(cpu, sched_group_cpus(sdg))
4382 			power += power_of(cpu);
4383 	} else  {
4384 		/*
4385 		 * !SD_OVERLAP domains can assume that child groups
4386 		 * span the current group.
4387 		 */
4388 
4389 		group = child->groups;
4390 		do {
4391 			power += group->sgp->power;
4392 			group = group->next;
4393 		} while (group != child->groups);
4394 	}
4395 
4396 	sdg->sgp->power_orig = sdg->sgp->power = power;
4397 }
4398 
4399 /*
4400  * Try and fix up capacity for tiny siblings, this is needed when
4401  * things like SD_ASYM_PACKING need f_b_g to select another sibling
4402  * which on its own isn't powerful enough.
4403  *
4404  * See update_sd_pick_busiest() and check_asym_packing().
4405  */
4406 static inline int
fix_small_capacity(struct sched_domain * sd,struct sched_group * group)4407 fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4408 {
4409 	/*
4410 	 * Only siblings can have significantly less than SCHED_POWER_SCALE
4411 	 */
4412 	if (!(sd->flags & SD_SHARE_CPUPOWER))
4413 		return 0;
4414 
4415 	/*
4416 	 * If ~90% of the cpu_power is still there, we're good.
4417 	 */
4418 	if (group->sgp->power * 32 > group->sgp->power_orig * 29)
4419 		return 1;
4420 
4421 	return 0;
4422 }
4423 
4424 /**
4425  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
4426  * @env: The load balancing environment.
4427  * @group: sched_group whose statistics are to be updated.
4428  * @load_idx: Load index of sched_domain of this_cpu for load calc.
4429  * @local_group: Does group contain this_cpu.
4430  * @balance: Should we balance.
4431  * @sgs: variable to hold the statistics for this group.
4432  */
update_sg_lb_stats(struct lb_env * env,struct sched_group * group,int load_idx,int local_group,int * balance,struct sg_lb_stats * sgs)4433 static inline void update_sg_lb_stats(struct lb_env *env,
4434 			struct sched_group *group, int load_idx,
4435 			int local_group, int *balance, struct sg_lb_stats *sgs)
4436 {
4437 	unsigned long nr_running, max_nr_running, min_nr_running;
4438 	unsigned long load, max_cpu_load, min_cpu_load;
4439 	unsigned int balance_cpu = -1, first_idle_cpu = 0;
4440 	unsigned long avg_load_per_task = 0;
4441 	int i;
4442 
4443 	if (local_group)
4444 		balance_cpu = group_balance_cpu(group);
4445 
4446 	/* Tally up the load of all CPUs in the group */
4447 	max_cpu_load = 0;
4448 	min_cpu_load = ~0UL;
4449 	max_nr_running = 0;
4450 	min_nr_running = ~0UL;
4451 
4452 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
4453 		struct rq *rq = cpu_rq(i);
4454 
4455 		nr_running = rq->nr_running;
4456 
4457 		/* Bias balancing toward cpus of our domain */
4458 		if (local_group) {
4459 			if (idle_cpu(i) && !first_idle_cpu &&
4460 					cpumask_test_cpu(i, sched_group_mask(group))) {
4461 				first_idle_cpu = 1;
4462 				balance_cpu = i;
4463 			}
4464 
4465 			load = target_load(i, load_idx);
4466 		} else {
4467 			load = source_load(i, load_idx);
4468 			if (load > max_cpu_load)
4469 				max_cpu_load = load;
4470 			if (min_cpu_load > load)
4471 				min_cpu_load = load;
4472 
4473 			if (nr_running > max_nr_running)
4474 				max_nr_running = nr_running;
4475 			if (min_nr_running > nr_running)
4476 				min_nr_running = nr_running;
4477 		}
4478 
4479 		sgs->group_load += load;
4480 		sgs->sum_nr_running += nr_running;
4481 		sgs->sum_weighted_load += weighted_cpuload(i);
4482 		if (idle_cpu(i))
4483 			sgs->idle_cpus++;
4484 	}
4485 
4486 	/*
4487 	 * First idle cpu or the first cpu(busiest) in this sched group
4488 	 * is eligible for doing load balancing at this and above
4489 	 * domains. In the newly idle case, we will allow all the cpu's
4490 	 * to do the newly idle load balance.
4491 	 */
4492 	if (local_group) {
4493 		if (env->idle != CPU_NEWLY_IDLE) {
4494 			if (balance_cpu != env->dst_cpu) {
4495 				*balance = 0;
4496 				return;
4497 			}
4498 			update_group_power(env->sd, env->dst_cpu);
4499 		} else if (time_after_eq(jiffies, group->sgp->next_update))
4500 			update_group_power(env->sd, env->dst_cpu);
4501 	}
4502 
4503 	/* Adjust by relative CPU power of the group */
4504 	sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
4505 
4506 	/*
4507 	 * Consider the group unbalanced when the imbalance is larger
4508 	 * than the average weight of a task.
4509 	 *
4510 	 * APZ: with cgroup the avg task weight can vary wildly and
4511 	 *      might not be a suitable number - should we keep a
4512 	 *      normalized nr_running number somewhere that negates
4513 	 *      the hierarchy?
4514 	 */
4515 	if (sgs->sum_nr_running)
4516 		avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
4517 
4518 	if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
4519 	    (max_nr_running - min_nr_running) > 1)
4520 		sgs->group_imb = 1;
4521 
4522 	sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
4523 						SCHED_POWER_SCALE);
4524 	if (!sgs->group_capacity)
4525 		sgs->group_capacity = fix_small_capacity(env->sd, group);
4526 	sgs->group_weight = group->group_weight;
4527 
4528 	if (sgs->group_capacity > sgs->sum_nr_running)
4529 		sgs->group_has_capacity = 1;
4530 }
4531 
4532 /**
4533  * update_sd_pick_busiest - return 1 on busiest group
4534  * @env: The load balancing environment.
4535  * @sds: sched_domain statistics
4536  * @sg: sched_group candidate to be checked for being the busiest
4537  * @sgs: sched_group statistics
4538  *
4539  * Determine if @sg is a busier group than the previously selected
4540  * busiest group.
4541  */
update_sd_pick_busiest(struct lb_env * env,struct sd_lb_stats * sds,struct sched_group * sg,struct sg_lb_stats * sgs)4542 static bool update_sd_pick_busiest(struct lb_env *env,
4543 				   struct sd_lb_stats *sds,
4544 				   struct sched_group *sg,
4545 				   struct sg_lb_stats *sgs)
4546 {
4547 	if (sgs->avg_load <= sds->max_load)
4548 		return false;
4549 
4550 	if (sgs->sum_nr_running > sgs->group_capacity)
4551 		return true;
4552 
4553 	if (sgs->group_imb)
4554 		return true;
4555 
4556 	/*
4557 	 * ASYM_PACKING needs to move all the work to the lowest
4558 	 * numbered CPUs in the group, therefore mark all groups
4559 	 * higher than ourself as busy.
4560 	 */
4561 	if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4562 	    env->dst_cpu < group_first_cpu(sg)) {
4563 		if (!sds->busiest)
4564 			return true;
4565 
4566 		if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4567 			return true;
4568 	}
4569 
4570 	return false;
4571 }
4572 
4573 /**
4574  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
4575  * @env: The load balancing environment.
4576  * @balance: Should we balance.
4577  * @sds: variable to hold the statistics for this sched_domain.
4578  */
update_sd_lb_stats(struct lb_env * env,int * balance,struct sd_lb_stats * sds)4579 static inline void update_sd_lb_stats(struct lb_env *env,
4580 					int *balance, struct sd_lb_stats *sds)
4581 {
4582 	struct sched_domain *child = env->sd->child;
4583 	struct sched_group *sg = env->sd->groups;
4584 	struct sg_lb_stats sgs;
4585 	int load_idx, prefer_sibling = 0;
4586 
4587 	if (child && child->flags & SD_PREFER_SIBLING)
4588 		prefer_sibling = 1;
4589 
4590 	load_idx = get_sd_load_idx(env->sd, env->idle);
4591 
4592 	do {
4593 		int local_group;
4594 
4595 		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
4596 		memset(&sgs, 0, sizeof(sgs));
4597 		update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
4598 
4599 		if (local_group && !(*balance))
4600 			return;
4601 
4602 		sds->total_load += sgs.group_load;
4603 		sds->total_pwr += sg->sgp->power;
4604 
4605 		/*
4606 		 * In case the child domain prefers tasks go to siblings
4607 		 * first, lower the sg capacity to one so that we'll try
4608 		 * and move all the excess tasks away. We lower the capacity
4609 		 * of a group only if the local group has the capacity to fit
4610 		 * these excess tasks, i.e. nr_running < group_capacity. The
4611 		 * extra check prevents the case where you always pull from the
4612 		 * heaviest group when it is already under-utilized (possible
4613 		 * with a large weight task outweighs the tasks on the system).
4614 		 */
4615 		if (prefer_sibling && !local_group && sds->this_has_capacity)
4616 			sgs.group_capacity = min(sgs.group_capacity, 1UL);
4617 
4618 		if (local_group) {
4619 			sds->this_load = sgs.avg_load;
4620 			sds->this = sg;
4621 			sds->this_nr_running = sgs.sum_nr_running;
4622 			sds->this_load_per_task = sgs.sum_weighted_load;
4623 			sds->this_has_capacity = sgs.group_has_capacity;
4624 			sds->this_idle_cpus = sgs.idle_cpus;
4625 		} else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
4626 			sds->max_load = sgs.avg_load;
4627 			sds->busiest = sg;
4628 			sds->busiest_nr_running = sgs.sum_nr_running;
4629 			sds->busiest_idle_cpus = sgs.idle_cpus;
4630 			sds->busiest_group_capacity = sgs.group_capacity;
4631 			sds->busiest_load_per_task = sgs.sum_weighted_load;
4632 			sds->busiest_has_capacity = sgs.group_has_capacity;
4633 			sds->busiest_group_weight = sgs.group_weight;
4634 			sds->group_imb = sgs.group_imb;
4635 		}
4636 
4637 		sg = sg->next;
4638 	} while (sg != env->sd->groups);
4639 }
4640 
4641 /**
4642  * check_asym_packing - Check to see if the group is packed into the
4643  *			sched doman.
4644  *
4645  * This is primarily intended to used at the sibling level.  Some
4646  * cores like POWER7 prefer to use lower numbered SMT threads.  In the
4647  * case of POWER7, it can move to lower SMT modes only when higher
4648  * threads are idle.  When in lower SMT modes, the threads will
4649  * perform better since they share less core resources.  Hence when we
4650  * have idle threads, we want them to be the higher ones.
4651  *
4652  * This packing function is run on idle threads.  It checks to see if
4653  * the busiest CPU in this domain (core in the P7 case) has a higher
4654  * CPU number than the packing function is being run on.  Here we are
4655  * assuming lower CPU number will be equivalent to lower a SMT thread
4656  * number.
4657  *
4658  * Returns 1 when packing is required and a task should be moved to
4659  * this CPU.  The amount of the imbalance is returned in *imbalance.
4660  *
4661  * @env: The load balancing environment.
4662  * @sds: Statistics of the sched_domain which is to be packed
4663  */
check_asym_packing(struct lb_env * env,struct sd_lb_stats * sds)4664 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
4665 {
4666 	int busiest_cpu;
4667 
4668 	if (!(env->sd->flags & SD_ASYM_PACKING))
4669 		return 0;
4670 
4671 	if (!sds->busiest)
4672 		return 0;
4673 
4674 	busiest_cpu = group_first_cpu(sds->busiest);
4675 	if (env->dst_cpu > busiest_cpu)
4676 		return 0;
4677 
4678 	env->imbalance = DIV_ROUND_CLOSEST(
4679 		sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
4680 
4681 	return 1;
4682 }
4683 
4684 /**
4685  * fix_small_imbalance - Calculate the minor imbalance that exists
4686  *			amongst the groups of a sched_domain, during
4687  *			load balancing.
4688  * @env: The load balancing environment.
4689  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
4690  */
4691 static inline
fix_small_imbalance(struct lb_env * env,struct sd_lb_stats * sds)4692 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4693 {
4694 	unsigned long tmp, pwr_now = 0, pwr_move = 0;
4695 	unsigned int imbn = 2;
4696 	unsigned long scaled_busy_load_per_task;
4697 
4698 	if (sds->this_nr_running) {
4699 		sds->this_load_per_task /= sds->this_nr_running;
4700 		if (sds->busiest_load_per_task >
4701 				sds->this_load_per_task)
4702 			imbn = 1;
4703 	} else {
4704 		sds->this_load_per_task =
4705 			cpu_avg_load_per_task(env->dst_cpu);
4706 	}
4707 
4708 	scaled_busy_load_per_task = sds->busiest_load_per_task
4709 					 * SCHED_POWER_SCALE;
4710 	scaled_busy_load_per_task /= sds->busiest->sgp->power;
4711 
4712 	if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4713 			(scaled_busy_load_per_task * imbn)) {
4714 		env->imbalance = sds->busiest_load_per_task;
4715 		return;
4716 	}
4717 
4718 	/*
4719 	 * OK, we don't have enough imbalance to justify moving tasks,
4720 	 * however we may be able to increase total CPU power used by
4721 	 * moving them.
4722 	 */
4723 
4724 	pwr_now += sds->busiest->sgp->power *
4725 			min(sds->busiest_load_per_task, sds->max_load);
4726 	pwr_now += sds->this->sgp->power *
4727 			min(sds->this_load_per_task, sds->this_load);
4728 	pwr_now /= SCHED_POWER_SCALE;
4729 
4730 	/* Amount of load we'd subtract */
4731 	tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4732 		sds->busiest->sgp->power;
4733 	if (sds->max_load > tmp)
4734 		pwr_move += sds->busiest->sgp->power *
4735 			min(sds->busiest_load_per_task, sds->max_load - tmp);
4736 
4737 	/* Amount of load we'd add */
4738 	if (sds->max_load * sds->busiest->sgp->power <
4739 		sds->busiest_load_per_task * SCHED_POWER_SCALE)
4740 		tmp = (sds->max_load * sds->busiest->sgp->power) /
4741 			sds->this->sgp->power;
4742 	else
4743 		tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4744 			sds->this->sgp->power;
4745 	pwr_move += sds->this->sgp->power *
4746 			min(sds->this_load_per_task, sds->this_load + tmp);
4747 	pwr_move /= SCHED_POWER_SCALE;
4748 
4749 	/* Move if we gain throughput */
4750 	if (pwr_move > pwr_now)
4751 		env->imbalance = sds->busiest_load_per_task;
4752 }
4753 
4754 /**
4755  * calculate_imbalance - Calculate the amount of imbalance present within the
4756  *			 groups of a given sched_domain during load balance.
4757  * @env: load balance environment
4758  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
4759  */
calculate_imbalance(struct lb_env * env,struct sd_lb_stats * sds)4760 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4761 {
4762 	unsigned long max_pull, load_above_capacity = ~0UL;
4763 
4764 	sds->busiest_load_per_task /= sds->busiest_nr_running;
4765 	if (sds->group_imb) {
4766 		sds->busiest_load_per_task =
4767 			min(sds->busiest_load_per_task, sds->avg_load);
4768 	}
4769 
4770 	/*
4771 	 * In the presence of smp nice balancing, certain scenarios can have
4772 	 * max load less than avg load(as we skip the groups at or below
4773 	 * its cpu_power, while calculating max_load..)
4774 	 */
4775 	if (sds->max_load < sds->avg_load) {
4776 		env->imbalance = 0;
4777 		return fix_small_imbalance(env, sds);
4778 	}
4779 
4780 	if (!sds->group_imb) {
4781 		/*
4782 		 * Don't want to pull so many tasks that a group would go idle.
4783 		 */
4784 		load_above_capacity = (sds->busiest_nr_running -
4785 						sds->busiest_group_capacity);
4786 
4787 		load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
4788 
4789 		load_above_capacity /= sds->busiest->sgp->power;
4790 	}
4791 
4792 	/*
4793 	 * We're trying to get all the cpus to the average_load, so we don't
4794 	 * want to push ourselves above the average load, nor do we wish to
4795 	 * reduce the max loaded cpu below the average load. At the same time,
4796 	 * we also don't want to reduce the group load below the group capacity
4797 	 * (so that we can implement power-savings policies etc). Thus we look
4798 	 * for the minimum possible imbalance.
4799 	 * Be careful of negative numbers as they'll appear as very large values
4800 	 * with unsigned longs.
4801 	 */
4802 	max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
4803 
4804 	/* How much load to actually move to equalise the imbalance */
4805 	env->imbalance = min(max_pull * sds->busiest->sgp->power,
4806 		(sds->avg_load - sds->this_load) * sds->this->sgp->power)
4807 			/ SCHED_POWER_SCALE;
4808 
4809 	/*
4810 	 * if *imbalance is less than the average load per runnable task
4811 	 * there is no guarantee that any tasks will be moved so we'll have
4812 	 * a think about bumping its value to force at least one task to be
4813 	 * moved
4814 	 */
4815 	if (env->imbalance < sds->busiest_load_per_task)
4816 		return fix_small_imbalance(env, sds);
4817 
4818 }
4819 
4820 /******* find_busiest_group() helpers end here *********************/
4821 
4822 /**
4823  * find_busiest_group - Returns the busiest group within the sched_domain
4824  * if there is an imbalance. If there isn't an imbalance, and
4825  * the user has opted for power-savings, it returns a group whose
4826  * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4827  * such a group exists.
4828  *
4829  * Also calculates the amount of weighted load which should be moved
4830  * to restore balance.
4831  *
4832  * @env: The load balancing environment.
4833  * @balance: Pointer to a variable indicating if this_cpu
4834  *	is the appropriate cpu to perform load balancing at this_level.
4835  *
4836  * Returns:	- the busiest group if imbalance exists.
4837  *		- If no imbalance and user has opted for power-savings balance,
4838  *		   return the least loaded group whose CPUs can be
4839  *		   put to idle by rebalancing its tasks onto our group.
4840  */
4841 static struct sched_group *
find_busiest_group(struct lb_env * env,int * balance)4842 find_busiest_group(struct lb_env *env, int *balance)
4843 {
4844 	struct sd_lb_stats sds;
4845 
4846 	memset(&sds, 0, sizeof(sds));
4847 
4848 	/*
4849 	 * Compute the various statistics relavent for load balancing at
4850 	 * this level.
4851 	 */
4852 	update_sd_lb_stats(env, balance, &sds);
4853 
4854 	/*
4855 	 * this_cpu is not the appropriate cpu to perform load balancing at
4856 	 * this level.
4857 	 */
4858 	if (!(*balance))
4859 		goto ret;
4860 
4861 	if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
4862 	    check_asym_packing(env, &sds))
4863 		return sds.busiest;
4864 
4865 	/* There is no busy sibling group to pull tasks from */
4866 	if (!sds.busiest || sds.busiest_nr_running == 0)
4867 		goto out_balanced;
4868 
4869 	sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
4870 
4871 	/*
4872 	 * If the busiest group is imbalanced the below checks don't
4873 	 * work because they assumes all things are equal, which typically
4874 	 * isn't true due to cpus_allowed constraints and the like.
4875 	 */
4876 	if (sds.group_imb)
4877 		goto force_balance;
4878 
4879 	/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
4880 	if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
4881 			!sds.busiest_has_capacity)
4882 		goto force_balance;
4883 
4884 	/*
4885 	 * If the local group is more busy than the selected busiest group
4886 	 * don't try and pull any tasks.
4887 	 */
4888 	if (sds.this_load >= sds.max_load)
4889 		goto out_balanced;
4890 
4891 	/*
4892 	 * Don't pull any tasks if this group is already above the domain
4893 	 * average load.
4894 	 */
4895 	if (sds.this_load >= sds.avg_load)
4896 		goto out_balanced;
4897 
4898 	if (env->idle == CPU_IDLE) {
4899 		/*
4900 		 * This cpu is idle. If the busiest group load doesn't
4901 		 * have more tasks than the number of available cpu's and
4902 		 * there is no imbalance between this and busiest group
4903 		 * wrt to idle cpu's, it is balanced.
4904 		 */
4905 		if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
4906 		    sds.busiest_nr_running <= sds.busiest_group_weight)
4907 			goto out_balanced;
4908 	} else {
4909 		/*
4910 		 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4911 		 * imbalance_pct to be conservative.
4912 		 */
4913 		if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
4914 			goto out_balanced;
4915 	}
4916 
4917 force_balance:
4918 	/* Looks like there is an imbalance. Compute it */
4919 	calculate_imbalance(env, &sds);
4920 	return sds.busiest;
4921 
4922 out_balanced:
4923 ret:
4924 	env->imbalance = 0;
4925 	return NULL;
4926 }
4927 
4928 /*
4929  * find_busiest_queue - find the busiest runqueue among the cpus in group.
4930  */
find_busiest_queue(struct lb_env * env,struct sched_group * group)4931 static struct rq *find_busiest_queue(struct lb_env *env,
4932 				     struct sched_group *group)
4933 {
4934 	struct rq *busiest = NULL, *rq;
4935 	unsigned long max_load = 0;
4936 	int i;
4937 
4938 	for_each_cpu(i, sched_group_cpus(group)) {
4939 		unsigned long power = power_of(i);
4940 		unsigned long capacity = DIV_ROUND_CLOSEST(power,
4941 							   SCHED_POWER_SCALE);
4942 		unsigned long wl;
4943 
4944 		if (!capacity)
4945 			capacity = fix_small_capacity(env->sd, group);
4946 
4947 		if (!cpumask_test_cpu(i, env->cpus))
4948 			continue;
4949 
4950 		rq = cpu_rq(i);
4951 		wl = weighted_cpuload(i);
4952 
4953 		/*
4954 		 * When comparing with imbalance, use weighted_cpuload()
4955 		 * which is not scaled with the cpu power.
4956 		 */
4957 		if (capacity && rq->nr_running == 1 && wl > env->imbalance)
4958 			continue;
4959 
4960 		/*
4961 		 * For the load comparisons with the other cpu's, consider
4962 		 * the weighted_cpuload() scaled with the cpu power, so that
4963 		 * the load can be moved away from the cpu that is potentially
4964 		 * running at a lower capacity.
4965 		 */
4966 		wl = (wl * SCHED_POWER_SCALE) / power;
4967 
4968 		if (wl > max_load) {
4969 			max_load = wl;
4970 			busiest = rq;
4971 		}
4972 	}
4973 
4974 	return busiest;
4975 }
4976 
4977 /*
4978  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
4979  * so long as it is large enough.
4980  */
4981 #define MAX_PINNED_INTERVAL	512
4982 
4983 /* Working cpumask for load_balance and load_balance_newidle. */
4984 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
4985 
need_active_balance(struct lb_env * env)4986 static int need_active_balance(struct lb_env *env)
4987 {
4988 	struct sched_domain *sd = env->sd;
4989 
4990 	if (env->idle == CPU_NEWLY_IDLE) {
4991 
4992 		/*
4993 		 * ASYM_PACKING needs to force migrate tasks from busy but
4994 		 * higher numbered CPUs in order to pack all tasks in the
4995 		 * lowest numbered CPUs.
4996 		 */
4997 		if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
4998 			return 1;
4999 	}
5000 
5001 	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5002 }
5003 
5004 static int active_load_balance_cpu_stop(void *data);
5005 
5006 /*
5007  * Check this_cpu to ensure it is balanced within domain. Attempt to move
5008  * tasks if there is an imbalance.
5009  */
load_balance(int this_cpu,struct rq * this_rq,struct sched_domain * sd,enum cpu_idle_type idle,int * balance)5010 static int load_balance(int this_cpu, struct rq *this_rq,
5011 			struct sched_domain *sd, enum cpu_idle_type idle,
5012 			int *balance)
5013 {
5014 	int ld_moved, cur_ld_moved, active_balance = 0;
5015 	struct sched_group *group;
5016 	struct rq *busiest;
5017 	unsigned long flags;
5018 	struct cpumask *cpus = __get_cpu_var(load_balance_mask);
5019 
5020 	struct lb_env env = {
5021 		.sd		= sd,
5022 		.dst_cpu	= this_cpu,
5023 		.dst_rq		= this_rq,
5024 		.dst_grpmask    = sched_group_cpus(sd->groups),
5025 		.idle		= idle,
5026 		.loop_break	= sched_nr_migrate_break,
5027 		.cpus		= cpus,
5028 	};
5029 
5030 	/*
5031 	 * For NEWLY_IDLE load_balancing, we don't need to consider
5032 	 * other cpus in our group
5033 	 */
5034 	if (idle == CPU_NEWLY_IDLE)
5035 		env.dst_grpmask = NULL;
5036 
5037 	cpumask_copy(cpus, cpu_active_mask);
5038 
5039 	schedstat_inc(sd, lb_count[idle]);
5040 
5041 redo:
5042 	group = find_busiest_group(&env, balance);
5043 
5044 	if (*balance == 0)
5045 		goto out_balanced;
5046 
5047 	if (!group) {
5048 		schedstat_inc(sd, lb_nobusyg[idle]);
5049 		goto out_balanced;
5050 	}
5051 
5052 	busiest = find_busiest_queue(&env, group);
5053 	if (!busiest) {
5054 		schedstat_inc(sd, lb_nobusyq[idle]);
5055 		goto out_balanced;
5056 	}
5057 
5058 	BUG_ON(busiest == env.dst_rq);
5059 
5060 	schedstat_add(sd, lb_imbalance[idle], env.imbalance);
5061 
5062 	ld_moved = 0;
5063 	if (busiest->nr_running > 1) {
5064 		/*
5065 		 * Attempt to move tasks. If find_busiest_group has found
5066 		 * an imbalance but busiest->nr_running <= 1, the group is
5067 		 * still unbalanced. ld_moved simply stays zero, so it is
5068 		 * correctly treated as an imbalance.
5069 		 */
5070 		env.flags |= LBF_ALL_PINNED;
5071 		env.src_cpu   = busiest->cpu;
5072 		env.src_rq    = busiest;
5073 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
5074 
5075 		update_h_load(env.src_cpu);
5076 more_balance:
5077 		local_irq_save(flags);
5078 		double_rq_lock(env.dst_rq, busiest);
5079 
5080 		/*
5081 		 * cur_ld_moved - load moved in current iteration
5082 		 * ld_moved     - cumulative load moved across iterations
5083 		 */
5084 		cur_ld_moved = move_tasks(&env);
5085 		ld_moved += cur_ld_moved;
5086 		double_rq_unlock(env.dst_rq, busiest);
5087 		local_irq_restore(flags);
5088 
5089 		/*
5090 		 * some other cpu did the load balance for us.
5091 		 */
5092 		if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5093 			resched_cpu(env.dst_cpu);
5094 
5095 		if (env.flags & LBF_NEED_BREAK) {
5096 			env.flags &= ~LBF_NEED_BREAK;
5097 			goto more_balance;
5098 		}
5099 
5100 		/*
5101 		 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5102 		 * us and move them to an alternate dst_cpu in our sched_group
5103 		 * where they can run. The upper limit on how many times we
5104 		 * iterate on same src_cpu is dependent on number of cpus in our
5105 		 * sched_group.
5106 		 *
5107 		 * This changes load balance semantics a bit on who can move
5108 		 * load to a given_cpu. In addition to the given_cpu itself
5109 		 * (or a ilb_cpu acting on its behalf where given_cpu is
5110 		 * nohz-idle), we now have balance_cpu in a position to move
5111 		 * load to given_cpu. In rare situations, this may cause
5112 		 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5113 		 * _independently_ and at _same_ time to move some load to
5114 		 * given_cpu) causing exceess load to be moved to given_cpu.
5115 		 * This however should not happen so much in practice and
5116 		 * moreover subsequent load balance cycles should correct the
5117 		 * excess load moved.
5118 		 */
5119 		if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
5120 
5121 			env.dst_rq	 = cpu_rq(env.new_dst_cpu);
5122 			env.dst_cpu	 = env.new_dst_cpu;
5123 			env.flags	&= ~LBF_SOME_PINNED;
5124 			env.loop	 = 0;
5125 			env.loop_break	 = sched_nr_migrate_break;
5126 
5127 			/* Prevent to re-select dst_cpu via env's cpus */
5128 			cpumask_clear_cpu(env.dst_cpu, env.cpus);
5129 
5130 			/*
5131 			 * Go back to "more_balance" rather than "redo" since we
5132 			 * need to continue with same src_cpu.
5133 			 */
5134 			goto more_balance;
5135 		}
5136 
5137 		/* All tasks on this runqueue were pinned by CPU affinity */
5138 		if (unlikely(env.flags & LBF_ALL_PINNED)) {
5139 			cpumask_clear_cpu(cpu_of(busiest), cpus);
5140 			if (!cpumask_empty(cpus)) {
5141 				env.loop = 0;
5142 				env.loop_break = sched_nr_migrate_break;
5143 				goto redo;
5144 			}
5145 			goto out_balanced;
5146 		}
5147 	}
5148 
5149 	if (!ld_moved) {
5150 		schedstat_inc(sd, lb_failed[idle]);
5151 		/*
5152 		 * Increment the failure counter only on periodic balance.
5153 		 * We do not want newidle balance, which can be very
5154 		 * frequent, pollute the failure counter causing
5155 		 * excessive cache_hot migrations and active balances.
5156 		 */
5157 		if (idle != CPU_NEWLY_IDLE)
5158 			sd->nr_balance_failed++;
5159 
5160 		if (need_active_balance(&env)) {
5161 			raw_spin_lock_irqsave(&busiest->lock, flags);
5162 
5163 			/* don't kick the active_load_balance_cpu_stop,
5164 			 * if the curr task on busiest cpu can't be
5165 			 * moved to this_cpu
5166 			 */
5167 			if (!cpumask_test_cpu(this_cpu,
5168 					tsk_cpus_allowed(busiest->curr))) {
5169 				raw_spin_unlock_irqrestore(&busiest->lock,
5170 							    flags);
5171 				env.flags |= LBF_ALL_PINNED;
5172 				goto out_one_pinned;
5173 			}
5174 
5175 			/*
5176 			 * ->active_balance synchronizes accesses to
5177 			 * ->active_balance_work.  Once set, it's cleared
5178 			 * only after active load balance is finished.
5179 			 */
5180 			if (!busiest->active_balance) {
5181 				busiest->active_balance = 1;
5182 				busiest->push_cpu = this_cpu;
5183 				active_balance = 1;
5184 			}
5185 			raw_spin_unlock_irqrestore(&busiest->lock, flags);
5186 
5187 			if (active_balance) {
5188 				stop_one_cpu_nowait(cpu_of(busiest),
5189 					active_load_balance_cpu_stop, busiest,
5190 					&busiest->active_balance_work);
5191 			}
5192 
5193 			/*
5194 			 * We've kicked active balancing, reset the failure
5195 			 * counter.
5196 			 */
5197 			sd->nr_balance_failed = sd->cache_nice_tries+1;
5198 		}
5199 	} else
5200 		sd->nr_balance_failed = 0;
5201 
5202 	if (likely(!active_balance)) {
5203 		/* We were unbalanced, so reset the balancing interval */
5204 		sd->balance_interval = sd->min_interval;
5205 	} else {
5206 		/*
5207 		 * If we've begun active balancing, start to back off. This
5208 		 * case may not be covered by the all_pinned logic if there
5209 		 * is only 1 task on the busy runqueue (because we don't call
5210 		 * move_tasks).
5211 		 */
5212 		if (sd->balance_interval < sd->max_interval)
5213 			sd->balance_interval *= 2;
5214 	}
5215 
5216 	goto out;
5217 
5218 out_balanced:
5219 	schedstat_inc(sd, lb_balanced[idle]);
5220 
5221 	sd->nr_balance_failed = 0;
5222 
5223 out_one_pinned:
5224 	/* tune up the balancing interval */
5225 	if (((env.flags & LBF_ALL_PINNED) &&
5226 			sd->balance_interval < MAX_PINNED_INTERVAL) ||
5227 			(sd->balance_interval < sd->max_interval))
5228 		sd->balance_interval *= 2;
5229 
5230 	ld_moved = 0;
5231 out:
5232 	return ld_moved;
5233 }
5234 
5235 /*
5236  * idle_balance is called by schedule() if this_cpu is about to become
5237  * idle. Attempts to pull tasks from other CPUs.
5238  */
idle_balance(int this_cpu,struct rq * this_rq)5239 void idle_balance(int this_cpu, struct rq *this_rq)
5240 {
5241 	struct sched_domain *sd;
5242 	int pulled_task = 0;
5243 	unsigned long next_balance = jiffies + HZ;
5244 
5245 	this_rq->idle_stamp = this_rq->clock;
5246 
5247 	if (this_rq->avg_idle < sysctl_sched_migration_cost)
5248 		return;
5249 
5250 	/*
5251 	 * Drop the rq->lock, but keep IRQ/preempt disabled.
5252 	 */
5253 	raw_spin_unlock(&this_rq->lock);
5254 
5255 	update_blocked_averages(this_cpu);
5256 	rcu_read_lock();
5257 	for_each_domain(this_cpu, sd) {
5258 		unsigned long interval;
5259 		int balance = 1;
5260 
5261 		if (!(sd->flags & SD_LOAD_BALANCE))
5262 			continue;
5263 
5264 		if (sd->flags & SD_BALANCE_NEWIDLE) {
5265 			/* If we've pulled tasks over stop searching: */
5266 			pulled_task = load_balance(this_cpu, this_rq,
5267 						   sd, CPU_NEWLY_IDLE, &balance);
5268 		}
5269 
5270 		interval = msecs_to_jiffies(sd->balance_interval);
5271 		if (time_after(next_balance, sd->last_balance + interval))
5272 			next_balance = sd->last_balance + interval;
5273 		if (pulled_task) {
5274 			this_rq->idle_stamp = 0;
5275 			break;
5276 		}
5277 	}
5278 	rcu_read_unlock();
5279 
5280 	raw_spin_lock(&this_rq->lock);
5281 
5282 	if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5283 		/*
5284 		 * We are going idle. next_balance may be set based on
5285 		 * a busy processor. So reset next_balance.
5286 		 */
5287 		this_rq->next_balance = next_balance;
5288 	}
5289 }
5290 
5291 /*
5292  * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5293  * running tasks off the busiest CPU onto idle CPUs. It requires at
5294  * least 1 task to be running on each physical CPU where possible, and
5295  * avoids physical / logical imbalances.
5296  */
active_load_balance_cpu_stop(void * data)5297 static int active_load_balance_cpu_stop(void *data)
5298 {
5299 	struct rq *busiest_rq = data;
5300 	int busiest_cpu = cpu_of(busiest_rq);
5301 	int target_cpu = busiest_rq->push_cpu;
5302 	struct rq *target_rq = cpu_rq(target_cpu);
5303 	struct sched_domain *sd;
5304 
5305 	raw_spin_lock_irq(&busiest_rq->lock);
5306 
5307 	/* make sure the requested cpu hasn't gone down in the meantime */
5308 	if (unlikely(busiest_cpu != smp_processor_id() ||
5309 		     !busiest_rq->active_balance))
5310 		goto out_unlock;
5311 
5312 	/* Is there any task to move? */
5313 	if (busiest_rq->nr_running <= 1)
5314 		goto out_unlock;
5315 
5316 	/*
5317 	 * This condition is "impossible", if it occurs
5318 	 * we need to fix it. Originally reported by
5319 	 * Bjorn Helgaas on a 128-cpu setup.
5320 	 */
5321 	BUG_ON(busiest_rq == target_rq);
5322 
5323 	/* move a task from busiest_rq to target_rq */
5324 	double_lock_balance(busiest_rq, target_rq);
5325 
5326 	/* Search for an sd spanning us and the target CPU. */
5327 	rcu_read_lock();
5328 	for_each_domain(target_cpu, sd) {
5329 		if ((sd->flags & SD_LOAD_BALANCE) &&
5330 		    cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5331 				break;
5332 	}
5333 
5334 	if (likely(sd)) {
5335 		struct lb_env env = {
5336 			.sd		= sd,
5337 			.dst_cpu	= target_cpu,
5338 			.dst_rq		= target_rq,
5339 			.src_cpu	= busiest_rq->cpu,
5340 			.src_rq		= busiest_rq,
5341 			.idle		= CPU_IDLE,
5342 		};
5343 
5344 		schedstat_inc(sd, alb_count);
5345 
5346 		if (move_one_task(&env))
5347 			schedstat_inc(sd, alb_pushed);
5348 		else
5349 			schedstat_inc(sd, alb_failed);
5350 	}
5351 	rcu_read_unlock();
5352 	double_unlock_balance(busiest_rq, target_rq);
5353 out_unlock:
5354 	busiest_rq->active_balance = 0;
5355 	raw_spin_unlock_irq(&busiest_rq->lock);
5356 	return 0;
5357 }
5358 
5359 #ifdef CONFIG_NO_HZ_COMMON
5360 /*
5361  * idle load balancing details
5362  * - When one of the busy CPUs notice that there may be an idle rebalancing
5363  *   needed, they will kick the idle load balancer, which then does idle
5364  *   load balancing for all the idle CPUs.
5365  */
5366 static struct {
5367 	cpumask_var_t idle_cpus_mask;
5368 	atomic_t nr_cpus;
5369 	unsigned long next_balance;     /* in jiffy units */
5370 } nohz ____cacheline_aligned;
5371 
find_new_ilb(int call_cpu)5372 static inline int find_new_ilb(int call_cpu)
5373 {
5374 	int ilb = cpumask_first(nohz.idle_cpus_mask);
5375 
5376 	if (ilb < nr_cpu_ids && idle_cpu(ilb))
5377 		return ilb;
5378 
5379 	return nr_cpu_ids;
5380 }
5381 
5382 /*
5383  * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
5384  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
5385  * CPU (if there is one).
5386  */
nohz_balancer_kick(int cpu)5387 static void nohz_balancer_kick(int cpu)
5388 {
5389 	int ilb_cpu;
5390 
5391 	nohz.next_balance++;
5392 
5393 	ilb_cpu = find_new_ilb(cpu);
5394 
5395 	if (ilb_cpu >= nr_cpu_ids)
5396 		return;
5397 
5398 	if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
5399 		return;
5400 	/*
5401 	 * Use smp_send_reschedule() instead of resched_cpu().
5402 	 * This way we generate a sched IPI on the target cpu which
5403 	 * is idle. And the softirq performing nohz idle load balance
5404 	 * will be run before returning from the IPI.
5405 	 */
5406 	smp_send_reschedule(ilb_cpu);
5407 	return;
5408 }
5409 
nohz_balance_exit_idle(int cpu)5410 static inline void nohz_balance_exit_idle(int cpu)
5411 {
5412 	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5413 		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5414 		atomic_dec(&nohz.nr_cpus);
5415 		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5416 	}
5417 }
5418 
set_cpu_sd_state_busy(void)5419 static inline void set_cpu_sd_state_busy(void)
5420 {
5421 	struct sched_domain *sd;
5422 	int cpu = smp_processor_id();
5423 
5424 	rcu_read_lock();
5425 	sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd);
5426 
5427 	if (!sd || !sd->nohz_idle)
5428 		goto unlock;
5429 	sd->nohz_idle = 0;
5430 
5431 	for (; sd; sd = sd->parent)
5432 		atomic_inc(&sd->groups->sgp->nr_busy_cpus);
5433 unlock:
5434 	rcu_read_unlock();
5435 }
5436 
set_cpu_sd_state_idle(void)5437 void set_cpu_sd_state_idle(void)
5438 {
5439 	struct sched_domain *sd;
5440 	int cpu = smp_processor_id();
5441 
5442 	rcu_read_lock();
5443 	sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd);
5444 
5445 	if (!sd || sd->nohz_idle)
5446 		goto unlock;
5447 	sd->nohz_idle = 1;
5448 
5449 	for (; sd; sd = sd->parent)
5450 		atomic_dec(&sd->groups->sgp->nr_busy_cpus);
5451 unlock:
5452 	rcu_read_unlock();
5453 }
5454 
5455 /*
5456  * This routine will record that the cpu is going idle with tick stopped.
5457  * This info will be used in performing idle load balancing in the future.
5458  */
nohz_balance_enter_idle(int cpu)5459 void nohz_balance_enter_idle(int cpu)
5460 {
5461 	/*
5462 	 * If this cpu is going down, then nothing needs to be done.
5463 	 */
5464 	if (!cpu_active(cpu))
5465 		return;
5466 
5467 	if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5468 		return;
5469 
5470 	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5471 	atomic_inc(&nohz.nr_cpus);
5472 	set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5473 }
5474 
sched_ilb_notifier(struct notifier_block * nfb,unsigned long action,void * hcpu)5475 static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
5476 					unsigned long action, void *hcpu)
5477 {
5478 	switch (action & ~CPU_TASKS_FROZEN) {
5479 	case CPU_DYING:
5480 		nohz_balance_exit_idle(smp_processor_id());
5481 		return NOTIFY_OK;
5482 	default:
5483 		return NOTIFY_DONE;
5484 	}
5485 }
5486 #endif
5487 
5488 static DEFINE_SPINLOCK(balancing);
5489 
5490 /*
5491  * Scale the max load_balance interval with the number of CPUs in the system.
5492  * This trades load-balance latency on larger machines for less cross talk.
5493  */
update_max_interval(void)5494 void update_max_interval(void)
5495 {
5496 	max_load_balance_interval = HZ*num_online_cpus()/10;
5497 }
5498 
5499 /*
5500  * It checks each scheduling domain to see if it is due to be balanced,
5501  * and initiates a balancing operation if so.
5502  *
5503  * Balancing parameters are set up in init_sched_domains.
5504  */
rebalance_domains(int cpu,enum cpu_idle_type idle)5505 static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5506 {
5507 	int balance = 1;
5508 	struct rq *rq = cpu_rq(cpu);
5509 	unsigned long interval;
5510 	struct sched_domain *sd;
5511 	/* Earliest time when we have to do rebalance again */
5512 	unsigned long next_balance = jiffies + 60*HZ;
5513 	int update_next_balance = 0;
5514 	int need_serialize;
5515 
5516 	update_blocked_averages(cpu);
5517 
5518 	rcu_read_lock();
5519 	for_each_domain(cpu, sd) {
5520 		if (!(sd->flags & SD_LOAD_BALANCE))
5521 			continue;
5522 
5523 		interval = sd->balance_interval;
5524 		if (idle != CPU_IDLE)
5525 			interval *= sd->busy_factor;
5526 
5527 		/* scale ms to jiffies */
5528 		interval = msecs_to_jiffies(interval);
5529 		interval = clamp(interval, 1UL, max_load_balance_interval);
5530 
5531 		need_serialize = sd->flags & SD_SERIALIZE;
5532 
5533 		if (need_serialize) {
5534 			if (!spin_trylock(&balancing))
5535 				goto out;
5536 		}
5537 
5538 		if (time_after_eq(jiffies, sd->last_balance + interval)) {
5539 			if (load_balance(cpu, rq, sd, idle, &balance)) {
5540 				/*
5541 				 * The LBF_SOME_PINNED logic could have changed
5542 				 * env->dst_cpu, so we can't know our idle
5543 				 * state even if we migrated tasks. Update it.
5544 				 */
5545 				idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
5546 			}
5547 			sd->last_balance = jiffies;
5548 		}
5549 		if (need_serialize)
5550 			spin_unlock(&balancing);
5551 out:
5552 		if (time_after(next_balance, sd->last_balance + interval)) {
5553 			next_balance = sd->last_balance + interval;
5554 			update_next_balance = 1;
5555 		}
5556 
5557 		/*
5558 		 * Stop the load balance at this level. There is another
5559 		 * CPU in our sched group which is doing load balancing more
5560 		 * actively.
5561 		 */
5562 		if (!balance)
5563 			break;
5564 	}
5565 	rcu_read_unlock();
5566 
5567 	/*
5568 	 * next_balance will be updated only when there is a need.
5569 	 * When the cpu is attached to null domain for ex, it will not be
5570 	 * updated.
5571 	 */
5572 	if (likely(update_next_balance))
5573 		rq->next_balance = next_balance;
5574 }
5575 
5576 #ifdef CONFIG_NO_HZ_COMMON
5577 /*
5578  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
5579  * rebalancing for all the cpus for whom scheduler ticks are stopped.
5580  */
nohz_idle_balance(int this_cpu,enum cpu_idle_type idle)5581 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5582 {
5583 	struct rq *this_rq = cpu_rq(this_cpu);
5584 	struct rq *rq;
5585 	int balance_cpu;
5586 
5587 	if (idle != CPU_IDLE ||
5588 	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5589 		goto end;
5590 
5591 	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
5592 		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
5593 			continue;
5594 
5595 		/*
5596 		 * If this cpu gets work to do, stop the load balancing
5597 		 * work being done for other cpus. Next load
5598 		 * balancing owner will pick it up.
5599 		 */
5600 		if (need_resched())
5601 			break;
5602 
5603 		rq = cpu_rq(balance_cpu);
5604 
5605 		raw_spin_lock_irq(&rq->lock);
5606 		update_rq_clock(rq);
5607 		update_idle_cpu_load(rq);
5608 		raw_spin_unlock_irq(&rq->lock);
5609 
5610 		rebalance_domains(balance_cpu, CPU_IDLE);
5611 
5612 		if (time_after(this_rq->next_balance, rq->next_balance))
5613 			this_rq->next_balance = rq->next_balance;
5614 	}
5615 	nohz.next_balance = this_rq->next_balance;
5616 end:
5617 	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
5618 }
5619 
5620 /*
5621  * Current heuristic for kicking the idle load balancer in the presence
5622  * of an idle cpu is the system.
5623  *   - This rq has more than one task.
5624  *   - At any scheduler domain level, this cpu's scheduler group has multiple
5625  *     busy cpu's exceeding the group's power.
5626  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5627  *     domain span are idle.
5628  */
nohz_kick_needed(struct rq * rq,int cpu)5629 static inline int nohz_kick_needed(struct rq *rq, int cpu)
5630 {
5631 	unsigned long now = jiffies;
5632 	struct sched_domain *sd;
5633 
5634 	if (unlikely(idle_cpu(cpu)))
5635 		return 0;
5636 
5637        /*
5638 	* We may be recently in ticked or tickless idle mode. At the first
5639 	* busy tick after returning from idle, we will update the busy stats.
5640 	*/
5641 	set_cpu_sd_state_busy();
5642 	nohz_balance_exit_idle(cpu);
5643 
5644 	/*
5645 	 * None are in tickless mode and hence no need for NOHZ idle load
5646 	 * balancing.
5647 	 */
5648 	if (likely(!atomic_read(&nohz.nr_cpus)))
5649 		return 0;
5650 
5651 	if (time_before(now, nohz.next_balance))
5652 		return 0;
5653 
5654 	if (rq->nr_running >= 2)
5655 		goto need_kick;
5656 
5657 	rcu_read_lock();
5658 	for_each_domain(cpu, sd) {
5659 		struct sched_group *sg = sd->groups;
5660 		struct sched_group_power *sgp = sg->sgp;
5661 		int nr_busy = atomic_read(&sgp->nr_busy_cpus);
5662 
5663 		if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
5664 			goto need_kick_unlock;
5665 
5666 		if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5667 		    && (cpumask_first_and(nohz.idle_cpus_mask,
5668 					  sched_domain_span(sd)) < cpu))
5669 			goto need_kick_unlock;
5670 
5671 		if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5672 			break;
5673 	}
5674 	rcu_read_unlock();
5675 	return 0;
5676 
5677 need_kick_unlock:
5678 	rcu_read_unlock();
5679 need_kick:
5680 	return 1;
5681 }
5682 #else
nohz_idle_balance(int this_cpu,enum cpu_idle_type idle)5683 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5684 #endif
5685 
5686 /*
5687  * run_rebalance_domains is triggered when needed from the scheduler tick.
5688  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
5689  */
run_rebalance_domains(struct softirq_action * h)5690 static void run_rebalance_domains(struct softirq_action *h)
5691 {
5692 	int this_cpu = smp_processor_id();
5693 	struct rq *this_rq = cpu_rq(this_cpu);
5694 	enum cpu_idle_type idle = this_rq->idle_balance ?
5695 						CPU_IDLE : CPU_NOT_IDLE;
5696 
5697 	rebalance_domains(this_cpu, idle);
5698 
5699 	/*
5700 	 * If this cpu has a pending nohz_balance_kick, then do the
5701 	 * balancing on behalf of the other idle cpus whose ticks are
5702 	 * stopped.
5703 	 */
5704 	nohz_idle_balance(this_cpu, idle);
5705 }
5706 
on_null_domain(int cpu)5707 static inline int on_null_domain(int cpu)
5708 {
5709 	return !rcu_dereference_sched(cpu_rq(cpu)->sd);
5710 }
5711 
5712 /*
5713  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
5714  */
trigger_load_balance(struct rq * rq,int cpu)5715 void trigger_load_balance(struct rq *rq, int cpu)
5716 {
5717 	/* Don't need to rebalance while attached to NULL domain */
5718 	if (time_after_eq(jiffies, rq->next_balance) &&
5719 	    likely(!on_null_domain(cpu)))
5720 		raise_softirq(SCHED_SOFTIRQ);
5721 #ifdef CONFIG_NO_HZ_COMMON
5722 	if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
5723 		nohz_balancer_kick(cpu);
5724 #endif
5725 }
5726 
rq_online_fair(struct rq * rq)5727 static void rq_online_fair(struct rq *rq)
5728 {
5729 	update_sysctl();
5730 }
5731 
rq_offline_fair(struct rq * rq)5732 static void rq_offline_fair(struct rq *rq)
5733 {
5734 	update_sysctl();
5735 
5736 	/* Ensure any throttled groups are reachable by pick_next_task */
5737 	unthrottle_offline_cfs_rqs(rq);
5738 }
5739 
5740 #endif /* CONFIG_SMP */
5741 
5742 /*
5743  * scheduler tick hitting a task of our scheduling class:
5744  */
task_tick_fair(struct rq * rq,struct task_struct * curr,int queued)5745 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
5746 {
5747 	struct cfs_rq *cfs_rq;
5748 	struct sched_entity *se = &curr->se;
5749 
5750 	for_each_sched_entity(se) {
5751 		cfs_rq = cfs_rq_of(se);
5752 		entity_tick(cfs_rq, se, queued);
5753 	}
5754 
5755 	if (sched_feat_numa(NUMA))
5756 		task_tick_numa(rq, curr);
5757 
5758 	update_rq_runnable_avg(rq, 1);
5759 }
5760 
5761 /*
5762  * called on fork with the child task as argument from the parent's context
5763  *  - child not yet on the tasklist
5764  *  - preemption disabled
5765  */
task_fork_fair(struct task_struct * p)5766 static void task_fork_fair(struct task_struct *p)
5767 {
5768 	struct cfs_rq *cfs_rq;
5769 	struct sched_entity *se = &p->se, *curr;
5770 	int this_cpu = smp_processor_id();
5771 	struct rq *rq = this_rq();
5772 	unsigned long flags;
5773 
5774 	raw_spin_lock_irqsave(&rq->lock, flags);
5775 
5776 	update_rq_clock(rq);
5777 
5778 	cfs_rq = task_cfs_rq(current);
5779 	curr = cfs_rq->curr;
5780 
5781 	if (unlikely(task_cpu(p) != this_cpu)) {
5782 		rcu_read_lock();
5783 		__set_task_cpu(p, this_cpu);
5784 		rcu_read_unlock();
5785 	}
5786 
5787 	update_curr(cfs_rq);
5788 
5789 	if (curr)
5790 		se->vruntime = curr->vruntime;
5791 	place_entity(cfs_rq, se, 1);
5792 
5793 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
5794 		/*
5795 		 * Upon rescheduling, sched_class::put_prev_task() will place
5796 		 * 'current' within the tree based on its new key value.
5797 		 */
5798 		swap(curr->vruntime, se->vruntime);
5799 		resched_task(rq->curr);
5800 	}
5801 
5802 	se->vruntime -= cfs_rq->min_vruntime;
5803 
5804 	raw_spin_unlock_irqrestore(&rq->lock, flags);
5805 }
5806 
5807 /*
5808  * Priority of the task has changed. Check to see if we preempt
5809  * the current task.
5810  */
5811 static void
prio_changed_fair(struct rq * rq,struct task_struct * p,int oldprio)5812 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
5813 {
5814 	if (!p->se.on_rq)
5815 		return;
5816 
5817 	/*
5818 	 * Reschedule if we are currently running on this runqueue and
5819 	 * our priority decreased, or if we are not currently running on
5820 	 * this runqueue and our priority is higher than the current's
5821 	 */
5822 	if (rq->curr == p) {
5823 		if (p->prio > oldprio)
5824 			resched_task(rq->curr);
5825 	} else
5826 		check_preempt_curr(rq, p, 0);
5827 }
5828 
switched_from_fair(struct rq * rq,struct task_struct * p)5829 static void switched_from_fair(struct rq *rq, struct task_struct *p)
5830 {
5831 	struct sched_entity *se = &p->se;
5832 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
5833 
5834 	/*
5835 	 * Ensure the task's vruntime is normalized, so that when its
5836 	 * switched back to the fair class the enqueue_entity(.flags=0) will
5837 	 * do the right thing.
5838 	 *
5839 	 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5840 	 * have normalized the vruntime, if it was !on_rq, then only when
5841 	 * the task is sleeping will it still have non-normalized vruntime.
5842 	 */
5843 	if (!se->on_rq && p->state != TASK_RUNNING) {
5844 		/*
5845 		 * Fix up our vruntime so that the current sleep doesn't
5846 		 * cause 'unlimited' sleep bonus.
5847 		 */
5848 		place_entity(cfs_rq, se, 0);
5849 		se->vruntime -= cfs_rq->min_vruntime;
5850 	}
5851 
5852 #if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
5853 	/*
5854 	* Remove our load from contribution when we leave sched_fair
5855 	* and ensure we don't carry in an old decay_count if we
5856 	* switch back.
5857 	*/
5858 	if (p->se.avg.decay_count) {
5859 		struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
5860 		__synchronize_entity_decay(&p->se);
5861 		subtract_blocked_load_contrib(cfs_rq,
5862 				p->se.avg.load_avg_contrib);
5863 	}
5864 #endif
5865 }
5866 
5867 /*
5868  * We switched to the sched_fair class.
5869  */
switched_to_fair(struct rq * rq,struct task_struct * p)5870 static void switched_to_fair(struct rq *rq, struct task_struct *p)
5871 {
5872 	if (!p->se.on_rq)
5873 		return;
5874 
5875 	/*
5876 	 * We were most likely switched from sched_rt, so
5877 	 * kick off the schedule if running, otherwise just see
5878 	 * if we can still preempt the current task.
5879 	 */
5880 	if (rq->curr == p)
5881 		resched_task(rq->curr);
5882 	else
5883 		check_preempt_curr(rq, p, 0);
5884 }
5885 
5886 /* Account for a task changing its policy or group.
5887  *
5888  * This routine is mostly called to set cfs_rq->curr field when a task
5889  * migrates between groups/classes.
5890  */
set_curr_task_fair(struct rq * rq)5891 static void set_curr_task_fair(struct rq *rq)
5892 {
5893 	struct sched_entity *se = &rq->curr->se;
5894 
5895 	for_each_sched_entity(se) {
5896 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
5897 
5898 		set_next_entity(cfs_rq, se);
5899 		/* ensure bandwidth has been allocated on our new cfs_rq */
5900 		account_cfs_rq_runtime(cfs_rq, 0);
5901 	}
5902 }
5903 
init_cfs_rq(struct cfs_rq * cfs_rq)5904 void init_cfs_rq(struct cfs_rq *cfs_rq)
5905 {
5906 	cfs_rq->tasks_timeline = RB_ROOT;
5907 	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5908 #ifndef CONFIG_64BIT
5909 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5910 #endif
5911 #if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
5912 	atomic64_set(&cfs_rq->decay_counter, 1);
5913 	atomic64_set(&cfs_rq->removed_load, 0);
5914 #endif
5915 }
5916 
5917 #ifdef CONFIG_FAIR_GROUP_SCHED
task_move_group_fair(struct task_struct * p,int on_rq)5918 static void task_move_group_fair(struct task_struct *p, int on_rq)
5919 {
5920 	struct cfs_rq *cfs_rq;
5921 	/*
5922 	 * If the task was not on the rq at the time of this cgroup movement
5923 	 * it must have been asleep, sleeping tasks keep their ->vruntime
5924 	 * absolute on their old rq until wakeup (needed for the fair sleeper
5925 	 * bonus in place_entity()).
5926 	 *
5927 	 * If it was on the rq, we've just 'preempted' it, which does convert
5928 	 * ->vruntime to a relative base.
5929 	 *
5930 	 * Make sure both cases convert their relative position when migrating
5931 	 * to another cgroup's rq. This does somewhat interfere with the
5932 	 * fair sleeper stuff for the first placement, but who cares.
5933 	 */
5934 	/*
5935 	 * When !on_rq, vruntime of the task has usually NOT been normalized.
5936 	 * But there are some cases where it has already been normalized:
5937 	 *
5938 	 * - Moving a forked child which is waiting for being woken up by
5939 	 *   wake_up_new_task().
5940 	 * - Moving a task which has been woken up by try_to_wake_up() and
5941 	 *   waiting for actually being woken up by sched_ttwu_pending().
5942 	 *
5943 	 * To prevent boost or penalty in the new cfs_rq caused by delta
5944 	 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
5945 	 */
5946 	if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
5947 		on_rq = 1;
5948 
5949 	if (!on_rq)
5950 		p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
5951 	set_task_rq(p, task_cpu(p));
5952 	if (!on_rq) {
5953 		cfs_rq = cfs_rq_of(&p->se);
5954 		p->se.vruntime += cfs_rq->min_vruntime;
5955 #ifdef CONFIG_SMP
5956 		/*
5957 		 * migrate_task_rq_fair() will have removed our previous
5958 		 * contribution, but we must synchronize for ongoing future
5959 		 * decay.
5960 		 */
5961 		p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
5962 		cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
5963 #endif
5964 	}
5965 }
5966 
free_fair_sched_group(struct task_group * tg)5967 void free_fair_sched_group(struct task_group *tg)
5968 {
5969 	int i;
5970 
5971 	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
5972 
5973 	for_each_possible_cpu(i) {
5974 		if (tg->cfs_rq)
5975 			kfree(tg->cfs_rq[i]);
5976 		if (tg->se)
5977 			kfree(tg->se[i]);
5978 	}
5979 
5980 	kfree(tg->cfs_rq);
5981 	kfree(tg->se);
5982 }
5983 
alloc_fair_sched_group(struct task_group * tg,struct task_group * parent)5984 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
5985 {
5986 	struct cfs_rq *cfs_rq;
5987 	struct sched_entity *se;
5988 	int i;
5989 
5990 	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
5991 	if (!tg->cfs_rq)
5992 		goto err;
5993 	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
5994 	if (!tg->se)
5995 		goto err;
5996 
5997 	tg->shares = NICE_0_LOAD;
5998 
5999 	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6000 
6001 	for_each_possible_cpu(i) {
6002 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6003 				      GFP_KERNEL, cpu_to_node(i));
6004 		if (!cfs_rq)
6005 			goto err;
6006 
6007 		se = kzalloc_node(sizeof(struct sched_entity),
6008 				  GFP_KERNEL, cpu_to_node(i));
6009 		if (!se)
6010 			goto err_free_rq;
6011 
6012 		init_cfs_rq(cfs_rq);
6013 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6014 	}
6015 
6016 	return 1;
6017 
6018 err_free_rq:
6019 	kfree(cfs_rq);
6020 err:
6021 	return 0;
6022 }
6023 
unregister_fair_sched_group(struct task_group * tg,int cpu)6024 void unregister_fair_sched_group(struct task_group *tg, int cpu)
6025 {
6026 	struct rq *rq = cpu_rq(cpu);
6027 	unsigned long flags;
6028 
6029 	/*
6030 	* Only empty task groups can be destroyed; so we can speculatively
6031 	* check on_list without danger of it being re-added.
6032 	*/
6033 	if (!tg->cfs_rq[cpu]->on_list)
6034 		return;
6035 
6036 	raw_spin_lock_irqsave(&rq->lock, flags);
6037 	list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6038 	raw_spin_unlock_irqrestore(&rq->lock, flags);
6039 }
6040 
init_tg_cfs_entry(struct task_group * tg,struct cfs_rq * cfs_rq,struct sched_entity * se,int cpu,struct sched_entity * parent)6041 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6042 			struct sched_entity *se, int cpu,
6043 			struct sched_entity *parent)
6044 {
6045 	struct rq *rq = cpu_rq(cpu);
6046 
6047 	cfs_rq->tg = tg;
6048 	cfs_rq->rq = rq;
6049 	init_cfs_rq_runtime(cfs_rq);
6050 
6051 	tg->cfs_rq[cpu] = cfs_rq;
6052 	tg->se[cpu] = se;
6053 
6054 	/* se could be NULL for root_task_group */
6055 	if (!se)
6056 		return;
6057 
6058 	if (!parent)
6059 		se->cfs_rq = &rq->cfs;
6060 	else
6061 		se->cfs_rq = parent->my_q;
6062 
6063 	se->my_q = cfs_rq;
6064 	update_load_set(&se->load, 0);
6065 	se->parent = parent;
6066 }
6067 
6068 static DEFINE_MUTEX(shares_mutex);
6069 
sched_group_set_shares(struct task_group * tg,unsigned long shares)6070 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6071 {
6072 	int i;
6073 	unsigned long flags;
6074 
6075 	/*
6076 	 * We can't change the weight of the root cgroup.
6077 	 */
6078 	if (!tg->se[0])
6079 		return -EINVAL;
6080 
6081 	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6082 
6083 	mutex_lock(&shares_mutex);
6084 	if (tg->shares == shares)
6085 		goto done;
6086 
6087 	tg->shares = shares;
6088 	for_each_possible_cpu(i) {
6089 		struct rq *rq = cpu_rq(i);
6090 		struct sched_entity *se;
6091 
6092 		se = tg->se[i];
6093 		/* Propagate contribution to hierarchy */
6094 		raw_spin_lock_irqsave(&rq->lock, flags);
6095 		for_each_sched_entity(se)
6096 			update_cfs_shares(group_cfs_rq(se));
6097 		raw_spin_unlock_irqrestore(&rq->lock, flags);
6098 	}
6099 
6100 done:
6101 	mutex_unlock(&shares_mutex);
6102 	return 0;
6103 }
6104 #else /* CONFIG_FAIR_GROUP_SCHED */
6105 
free_fair_sched_group(struct task_group * tg)6106 void free_fair_sched_group(struct task_group *tg) { }
6107 
alloc_fair_sched_group(struct task_group * tg,struct task_group * parent)6108 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6109 {
6110 	return 1;
6111 }
6112 
unregister_fair_sched_group(struct task_group * tg,int cpu)6113 void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6114 
6115 #endif /* CONFIG_FAIR_GROUP_SCHED */
6116 
6117 
get_rr_interval_fair(struct rq * rq,struct task_struct * task)6118 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
6119 {
6120 	struct sched_entity *se = &task->se;
6121 	unsigned int rr_interval = 0;
6122 
6123 	/*
6124 	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6125 	 * idle runqueue:
6126 	 */
6127 	if (rq->cfs.load.weight)
6128 		rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
6129 
6130 	return rr_interval;
6131 }
6132 
6133 /*
6134  * All the scheduling class methods:
6135  */
6136 const struct sched_class fair_sched_class = {
6137 	.next			= &idle_sched_class,
6138 	.enqueue_task		= enqueue_task_fair,
6139 	.dequeue_task		= dequeue_task_fair,
6140 	.yield_task		= yield_task_fair,
6141 	.yield_to_task		= yield_to_task_fair,
6142 
6143 	.check_preempt_curr	= check_preempt_wakeup,
6144 
6145 	.pick_next_task		= pick_next_task_fair,
6146 	.put_prev_task		= put_prev_task_fair,
6147 
6148 #ifdef CONFIG_SMP
6149 	.select_task_rq		= select_task_rq_fair,
6150 #ifdef CONFIG_FAIR_GROUP_SCHED
6151 	.migrate_task_rq	= migrate_task_rq_fair,
6152 #endif
6153 	.rq_online		= rq_online_fair,
6154 	.rq_offline		= rq_offline_fair,
6155 
6156 	.task_waking		= task_waking_fair,
6157 #endif
6158 
6159 	.set_curr_task          = set_curr_task_fair,
6160 	.task_tick		= task_tick_fair,
6161 	.task_fork		= task_fork_fair,
6162 
6163 	.prio_changed		= prio_changed_fair,
6164 	.switched_from		= switched_from_fair,
6165 	.switched_to		= switched_to_fair,
6166 
6167 	.get_rr_interval	= get_rr_interval_fair,
6168 
6169 #ifdef CONFIG_FAIR_GROUP_SCHED
6170 	.task_move_group	= task_move_group_fair,
6171 #endif
6172 };
6173 
6174 #ifdef CONFIG_SCHED_DEBUG
print_cfs_stats(struct seq_file * m,int cpu)6175 void print_cfs_stats(struct seq_file *m, int cpu)
6176 {
6177 	struct cfs_rq *cfs_rq;
6178 
6179 	rcu_read_lock();
6180 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
6181 		print_cfs_rq(m, cpu, cfs_rq);
6182 	rcu_read_unlock();
6183 }
6184 #endif
6185 
init_sched_fair_class(void)6186 __init void init_sched_fair_class(void)
6187 {
6188 #ifdef CONFIG_SMP
6189 	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6190 
6191 #ifdef CONFIG_NO_HZ_COMMON
6192 	nohz.next_balance = jiffies;
6193 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
6194 	cpu_notifier(sched_ilb_notifier, 0);
6195 #endif
6196 #endif /* SMP */
6197 
6198 }
6199