• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 #include <linux/sched.h>
3 #include <linux/sched/sysctl.h>
4 #include <linux/sched/rt.h>
5 #include <linux/u64_stats_sync.h>
6 #include <linux/sched/deadline.h>
7 #include <linux/binfmts.h>
8 #include <linux/mutex.h>
9 #include <linux/spinlock.h>
10 #include <linux/stop_machine.h>
11 #include <linux/irq_work.h>
12 #include <linux/tick.h>
13 #include <linux/slab.h>
14 
15 #include "cpupri.h"
16 #include "cpudeadline.h"
17 #include "cpuacct.h"
18 
19 #ifdef CONFIG_SCHED_DEBUG
20 #define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
21 #else
22 #define SCHED_WARN_ON(x)	((void)(x))
23 #endif
24 
25 struct rq;
26 struct cpuidle_state;
27 
28 /* task_struct::on_rq states: */
29 #define TASK_ON_RQ_QUEUED	1
30 #define TASK_ON_RQ_MIGRATING	2
31 
32 extern __read_mostly int scheduler_running;
33 
34 extern unsigned long calc_load_update;
35 extern atomic_long_t calc_load_tasks;
36 
37 extern void calc_global_load_tick(struct rq *this_rq);
38 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
39 
40 #ifdef CONFIG_SMP
41 extern void cpu_load_update_active(struct rq *this_rq);
42 extern void check_for_migration(struct rq *rq, struct task_struct *p);
43 #else
cpu_load_update_active(struct rq * this_rq)44 static inline void cpu_load_update_active(struct rq *this_rq) { }
check_for_migration(struct rq * rq,struct task_struct * p)45 static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
46 #endif
47 
48 #ifdef CONFIG_SCHED_SMT
49 extern void update_idle_core(struct rq *rq);
50 #else
update_idle_core(struct rq * rq)51 static inline void update_idle_core(struct rq *rq) { }
52 #endif
53 
54 /*
55  * Helpers for converting nanosecond timing to jiffy resolution
56  */
57 #define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
58 
59 /*
60  * Increase resolution of nice-level calculations for 64-bit architectures.
61  * The extra resolution improves shares distribution and load balancing of
62  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
63  * hierarchies, especially on larger systems. This is not a user-visible change
64  * and does not change the user-interface for setting shares/weights.
65  *
66  * We increase resolution only if we have enough bits to allow this increased
67  * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are
68  * pretty high and the returns do not justify the increased costs.
69  *
70  * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to
71  * increase coverage and consistency always enable it on 64bit platforms.
72  */
73 #ifdef CONFIG_64BIT
74 # define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
75 # define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
76 # define scale_load_down(w)	((w) >> SCHED_FIXEDPOINT_SHIFT)
77 #else
78 # define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
79 # define scale_load(w)		(w)
80 # define scale_load_down(w)	(w)
81 #endif
82 
83 /*
84  * Task weight (visible to users) and its load (invisible to users) have
85  * independent resolution, but they should be well calibrated. We use
86  * scale_load() and scale_load_down(w) to convert between them. The
87  * following must be true:
88  *
89  *  scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
90  *
91  */
92 #define NICE_0_LOAD		(1L << NICE_0_LOAD_SHIFT)
93 
94 /*
95  * Single value that decides SCHED_DEADLINE internal math precision.
96  * 10 -> just above 1us
97  * 9  -> just above 0.5us
98  */
99 #define DL_SCALE (10)
100 
101 /*
102  * These are the 'tuning knobs' of the scheduler:
103  */
104 
105 /*
106  * single value that denotes runtime == period, ie unlimited time.
107  */
108 #define RUNTIME_INF	((u64)~0ULL)
109 
idle_policy(int policy)110 static inline int idle_policy(int policy)
111 {
112 	return policy == SCHED_IDLE;
113 }
fair_policy(int policy)114 static inline int fair_policy(int policy)
115 {
116 	return policy == SCHED_NORMAL || policy == SCHED_BATCH;
117 }
118 
rt_policy(int policy)119 static inline int rt_policy(int policy)
120 {
121 	return policy == SCHED_FIFO || policy == SCHED_RR;
122 }
123 
dl_policy(int policy)124 static inline int dl_policy(int policy)
125 {
126 	return policy == SCHED_DEADLINE;
127 }
valid_policy(int policy)128 static inline bool valid_policy(int policy)
129 {
130 	return idle_policy(policy) || fair_policy(policy) ||
131 		rt_policy(policy) || dl_policy(policy);
132 }
133 
task_has_rt_policy(struct task_struct * p)134 static inline int task_has_rt_policy(struct task_struct *p)
135 {
136 	return rt_policy(p->policy);
137 }
138 
task_has_dl_policy(struct task_struct * p)139 static inline int task_has_dl_policy(struct task_struct *p)
140 {
141 	return dl_policy(p->policy);
142 }
143 
144 /*
145  * Tells if entity @a should preempt entity @b.
146  */
147 static inline bool
dl_entity_preempt(struct sched_dl_entity * a,struct sched_dl_entity * b)148 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
149 {
150 	return dl_time_before(a->deadline, b->deadline);
151 }
152 
153 /*
154  * This is the priority-queue data structure of the RT scheduling class:
155  */
156 struct rt_prio_array {
157 	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
158 	struct list_head queue[MAX_RT_PRIO];
159 };
160 
161 struct rt_bandwidth {
162 	/* nests inside the rq lock: */
163 	raw_spinlock_t		rt_runtime_lock;
164 	ktime_t			rt_period;
165 	u64			rt_runtime;
166 	struct hrtimer		rt_period_timer;
167 	unsigned int		rt_period_active;
168 };
169 
170 void __dl_clear_params(struct task_struct *p);
171 
172 /*
173  * To keep the bandwidth of -deadline tasks and groups under control
174  * we need some place where:
175  *  - store the maximum -deadline bandwidth of the system (the group);
176  *  - cache the fraction of that bandwidth that is currently allocated.
177  *
178  * This is all done in the data structure below. It is similar to the
179  * one used for RT-throttling (rt_bandwidth), with the main difference
180  * that, since here we are only interested in admission control, we
181  * do not decrease any runtime while the group "executes", neither we
182  * need a timer to replenish it.
183  *
184  * With respect to SMP, the bandwidth is given on a per-CPU basis,
185  * meaning that:
186  *  - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
187  *  - dl_total_bw array contains, in the i-eth element, the currently
188  *    allocated bandwidth on the i-eth CPU.
189  * Moreover, groups consume bandwidth on each CPU, while tasks only
190  * consume bandwidth on the CPU they're running on.
191  * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
192  * that will be shown the next time the proc or cgroup controls will
193  * be red. It on its turn can be changed by writing on its own
194  * control.
195  */
196 struct dl_bandwidth {
197 	raw_spinlock_t dl_runtime_lock;
198 	u64 dl_runtime;
199 	u64 dl_period;
200 };
201 
dl_bandwidth_enabled(void)202 static inline int dl_bandwidth_enabled(void)
203 {
204 	return sysctl_sched_rt_runtime >= 0;
205 }
206 
207 extern struct dl_bw *dl_bw_of(int i);
208 
209 struct dl_bw {
210 	raw_spinlock_t lock;
211 	u64 bw, total_bw;
212 };
213 
214 static inline
__dl_clear(struct dl_bw * dl_b,u64 tsk_bw)215 void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
216 {
217 	dl_b->total_bw -= tsk_bw;
218 }
219 
220 static inline
__dl_add(struct dl_bw * dl_b,u64 tsk_bw)221 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
222 {
223 	dl_b->total_bw += tsk_bw;
224 }
225 
226 static inline
__dl_overflow(struct dl_bw * dl_b,int cpus,u64 old_bw,u64 new_bw)227 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
228 {
229 	return dl_b->bw != -1 &&
230 	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
231 }
232 
233 extern struct mutex sched_domains_mutex;
234 
235 #ifdef CONFIG_CGROUP_SCHED
236 
237 #include <linux/cgroup.h>
238 
239 struct cfs_rq;
240 struct rt_rq;
241 
242 extern struct list_head task_groups;
243 
244 struct cfs_bandwidth {
245 #ifdef CONFIG_CFS_BANDWIDTH
246 	raw_spinlock_t lock;
247 	ktime_t period;
248 	u64 quota, runtime;
249 	s64 hierarchical_quota;
250 	u64 runtime_expires;
251 
252 	int idle, period_active;
253 	struct hrtimer period_timer, slack_timer;
254 	struct list_head throttled_cfs_rq;
255 
256 	/* statistics */
257 	int nr_periods, nr_throttled;
258 	u64 throttled_time;
259 #endif
260 };
261 
262 /* task group related information */
263 struct task_group {
264 	struct cgroup_subsys_state css;
265 
266 #ifdef CONFIG_FAIR_GROUP_SCHED
267 	/* schedulable entities of this group on each cpu */
268 	struct sched_entity **se;
269 	/* runqueue "owned" by this group on each cpu */
270 	struct cfs_rq **cfs_rq;
271 	unsigned long shares;
272 
273 #ifdef	CONFIG_SMP
274 	/*
275 	 * load_avg can be heavily contended at clock tick time, so put
276 	 * it in its own cacheline separated from the fields above which
277 	 * will also be accessed at each tick.
278 	 */
279 	atomic_long_t load_avg ____cacheline_aligned;
280 #endif
281 #endif
282 
283 #ifdef CONFIG_RT_GROUP_SCHED
284 	struct sched_rt_entity **rt_se;
285 	struct rt_rq **rt_rq;
286 
287 	struct rt_bandwidth rt_bandwidth;
288 #endif
289 
290 	struct rcu_head rcu;
291 	struct list_head list;
292 
293 	struct task_group *parent;
294 	struct list_head siblings;
295 	struct list_head children;
296 
297 #ifdef CONFIG_SCHED_AUTOGROUP
298 	struct autogroup *autogroup;
299 #endif
300 
301 	struct cfs_bandwidth cfs_bandwidth;
302 };
303 
304 #ifdef CONFIG_FAIR_GROUP_SCHED
305 #define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
306 
307 /*
308  * A weight of 0 or 1 can cause arithmetics problems.
309  * A weight of a cfs_rq is the sum of weights of which entities
310  * are queued on this cfs_rq, so a weight of a entity should not be
311  * too large, so as the shares value of a task group.
312  * (The default weight is 1024 - so there's no practical
313  *  limitation from this.)
314  */
315 #define MIN_SHARES	(1UL <<  1)
316 #define MAX_SHARES	(1UL << 18)
317 #endif
318 
319 typedef int (*tg_visitor)(struct task_group *, void *);
320 
321 extern int walk_tg_tree_from(struct task_group *from,
322 			     tg_visitor down, tg_visitor up, void *data);
323 
324 /*
325  * Iterate the full tree, calling @down when first entering a node and @up when
326  * leaving it for the final time.
327  *
328  * Caller must hold rcu_lock or sufficient equivalent.
329  */
walk_tg_tree(tg_visitor down,tg_visitor up,void * data)330 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
331 {
332 	return walk_tg_tree_from(&root_task_group, down, up, data);
333 }
334 
335 extern int tg_nop(struct task_group *tg, void *data);
336 
337 extern void free_fair_sched_group(struct task_group *tg);
338 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
339 extern void online_fair_sched_group(struct task_group *tg);
340 extern void unregister_fair_sched_group(struct task_group *tg);
341 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
342 			struct sched_entity *se, int cpu,
343 			struct sched_entity *parent);
344 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
345 
346 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
347 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
348 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
349 
350 extern void free_rt_sched_group(struct task_group *tg);
351 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
352 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
353 		struct sched_rt_entity *rt_se, int cpu,
354 		struct sched_rt_entity *parent);
355 
356 extern struct task_group *sched_create_group(struct task_group *parent);
357 extern void sched_online_group(struct task_group *tg,
358 			       struct task_group *parent);
359 extern void sched_destroy_group(struct task_group *tg);
360 extern void sched_offline_group(struct task_group *tg);
361 
362 extern void sched_move_task(struct task_struct *tsk);
363 
364 #ifdef CONFIG_FAIR_GROUP_SCHED
365 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
366 
367 #ifdef CONFIG_SMP
368 extern void set_task_rq_fair(struct sched_entity *se,
369 			     struct cfs_rq *prev, struct cfs_rq *next);
370 #else /* !CONFIG_SMP */
set_task_rq_fair(struct sched_entity * se,struct cfs_rq * prev,struct cfs_rq * next)371 static inline void set_task_rq_fair(struct sched_entity *se,
372 			     struct cfs_rq *prev, struct cfs_rq *next) { }
373 #endif /* CONFIG_SMP */
374 #endif /* CONFIG_FAIR_GROUP_SCHED */
375 
376 #else /* CONFIG_CGROUP_SCHED */
377 
378 struct cfs_bandwidth { };
379 
380 #endif	/* CONFIG_CGROUP_SCHED */
381 
382 /* CFS-related fields in a runqueue */
383 struct cfs_rq {
384 	struct load_weight load;
385 	unsigned int nr_running, h_nr_running;
386 
387 	u64 exec_clock;
388 	u64 min_vruntime;
389 #ifndef CONFIG_64BIT
390 	u64 min_vruntime_copy;
391 #endif
392 
393 	struct rb_root tasks_timeline;
394 	struct rb_node *rb_leftmost;
395 
396 	/*
397 	 * 'curr' points to currently running entity on this cfs_rq.
398 	 * It is set to NULL otherwise (i.e when none are currently running).
399 	 */
400 	struct sched_entity *curr, *next, *last, *skip;
401 
402 #ifdef	CONFIG_SCHED_DEBUG
403 	unsigned int nr_spread_over;
404 #endif
405 
406 #ifdef CONFIG_SMP
407 	/*
408 	 * CFS load tracking
409 	 */
410 	struct sched_avg avg;
411 	u64 runnable_load_sum;
412 	unsigned long runnable_load_avg;
413 #ifdef CONFIG_FAIR_GROUP_SCHED
414 	unsigned long tg_load_avg_contrib;
415 	unsigned long propagate_avg;
416 #endif
417 	atomic_long_t removed_load_avg, removed_util_avg;
418 #ifndef CONFIG_64BIT
419 	u64 load_last_update_time_copy;
420 #endif
421 
422 #ifdef CONFIG_FAIR_GROUP_SCHED
423 	/*
424 	 *   h_load = weight * f(tg)
425 	 *
426 	 * Where f(tg) is the recursive weight fraction assigned to
427 	 * this group.
428 	 */
429 	unsigned long h_load;
430 	u64 last_h_load_update;
431 	struct sched_entity *h_load_next;
432 #endif /* CONFIG_FAIR_GROUP_SCHED */
433 #endif /* CONFIG_SMP */
434 
435 #ifdef CONFIG_FAIR_GROUP_SCHED
436 	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
437 
438 	/*
439 	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
440 	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
441 	 * (like users, containers etc.)
442 	 *
443 	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
444 	 * list is used during load balance.
445 	 */
446 	int on_list;
447 	struct list_head leaf_cfs_rq_list;
448 	struct task_group *tg;	/* group that "owns" this runqueue */
449 
450 #ifdef CONFIG_SCHED_WALT
451 	u64 cumulative_runnable_avg;
452 #endif
453 
454 #ifdef CONFIG_CFS_BANDWIDTH
455 	int runtime_enabled;
456 	u64 runtime_expires;
457 	s64 runtime_remaining;
458 
459 	u64 throttled_clock, throttled_clock_task;
460 	u64 throttled_clock_task_time;
461 	int throttled, throttle_count;
462 	struct list_head throttled_list;
463 #endif /* CONFIG_CFS_BANDWIDTH */
464 #endif /* CONFIG_FAIR_GROUP_SCHED */
465 };
466 
rt_bandwidth_enabled(void)467 static inline int rt_bandwidth_enabled(void)
468 {
469 	return sysctl_sched_rt_runtime >= 0;
470 }
471 
472 /* RT IPI pull logic requires IRQ_WORK */
473 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
474 # define HAVE_RT_PUSH_IPI
475 #endif
476 
477 /* Real-Time classes' related field in a runqueue: */
478 struct rt_rq {
479 	struct rt_prio_array active;
480 	unsigned int rt_nr_running;
481 	unsigned int rr_nr_running;
482 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
483 	struct {
484 		int curr; /* highest queued rt task prio */
485 #ifdef CONFIG_SMP
486 		int next; /* next highest */
487 #endif
488 	} highest_prio;
489 #endif
490 #ifdef CONFIG_SMP
491 	unsigned long rt_nr_migratory;
492 	unsigned long rt_nr_total;
493 	int overloaded;
494 	struct plist_head pushable_tasks;
495 #endif /* CONFIG_SMP */
496 	int rt_queued;
497 
498 	int rt_throttled;
499 	u64 rt_time;
500 	u64 rt_runtime;
501 	/* Nests inside the rq lock: */
502 	raw_spinlock_t rt_runtime_lock;
503 
504 #ifdef CONFIG_RT_GROUP_SCHED
505 	unsigned long rt_nr_boosted;
506 
507 	struct rq *rq;
508 	struct task_group *tg;
509 #endif
510 };
511 
512 /* Deadline class' related fields in a runqueue */
513 struct dl_rq {
514 	/* runqueue is an rbtree, ordered by deadline */
515 	struct rb_root rb_root;
516 	struct rb_node *rb_leftmost;
517 
518 	unsigned long dl_nr_running;
519 
520 #ifdef CONFIG_SMP
521 	/*
522 	 * Deadline values of the currently executing and the
523 	 * earliest ready task on this rq. Caching these facilitates
524 	 * the decision wether or not a ready but not running task
525 	 * should migrate somewhere else.
526 	 */
527 	struct {
528 		u64 curr;
529 		u64 next;
530 	} earliest_dl;
531 
532 	unsigned long dl_nr_migratory;
533 	int overloaded;
534 
535 	/*
536 	 * Tasks on this rq that can be pushed away. They are kept in
537 	 * an rb-tree, ordered by tasks' deadlines, with caching
538 	 * of the leftmost (earliest deadline) element.
539 	 */
540 	struct rb_root pushable_dl_tasks_root;
541 	struct rb_node *pushable_dl_tasks_leftmost;
542 #else
543 	struct dl_bw dl_bw;
544 #endif
545 };
546 
547 #ifdef CONFIG_SMP
548 
549 struct max_cpu_capacity {
550 	raw_spinlock_t lock;
551 	unsigned long val;
552 	int cpu;
553 };
554 
555 /*
556  * We add the notion of a root-domain which will be used to define per-domain
557  * variables. Each exclusive cpuset essentially defines an island domain by
558  * fully partitioning the member cpus from any other cpuset. Whenever a new
559  * exclusive cpuset is created, we also create and attach a new root-domain
560  * object.
561  *
562  */
563 struct root_domain {
564 	atomic_t refcount;
565 	atomic_t rto_count;
566 	struct rcu_head rcu;
567 	cpumask_var_t span;
568 	cpumask_var_t online;
569 
570 	/* Indicate more than one runnable task for any CPU */
571 	bool overload;
572 
573 	/* Indicate one or more cpus over-utilized (tipping point) */
574 	bool overutilized;
575 
576 	/*
577 	 * The bit corresponding to a CPU gets set here if such CPU has more
578 	 * than one runnable -deadline task (as it is below for RT tasks).
579 	 */
580 	cpumask_var_t dlo_mask;
581 	atomic_t dlo_count;
582 	struct dl_bw dl_bw;
583 	struct cpudl cpudl;
584 
585 #ifdef HAVE_RT_PUSH_IPI
586 	/*
587 	 * For IPI pull requests, loop across the rto_mask.
588 	 */
589 	struct irq_work rto_push_work;
590 	raw_spinlock_t rto_lock;
591 	/* These are only updated and read within rto_lock */
592 	int rto_loop;
593 	int rto_cpu;
594 	/* These atomics are updated outside of a lock */
595 	atomic_t rto_loop_next;
596 	atomic_t rto_loop_start;
597 #endif
598 	/*
599 	 * The "RT overload" flag: it gets set if a CPU has more than
600 	 * one runnable RT task.
601 	 */
602 	cpumask_var_t rto_mask;
603 	struct cpupri cpupri;
604 
605 	/* Maximum cpu capacity in the system. */
606 	struct max_cpu_capacity max_cpu_capacity;
607 
608 	/* First cpu with maximum and minimum original capacity */
609 	int max_cap_orig_cpu, min_cap_orig_cpu;
610 };
611 
612 extern struct root_domain def_root_domain;
613 extern void sched_get_rd(struct root_domain *rd);
614 extern void sched_put_rd(struct root_domain *rd);
615 
616 #ifdef HAVE_RT_PUSH_IPI
617 extern void rto_push_irq_work_func(struct irq_work *work);
618 #endif
619 #endif /* CONFIG_SMP */
620 
621 /*
622  * This is the main, per-CPU runqueue data structure.
623  *
624  * Locking rule: those places that want to lock multiple runqueues
625  * (such as the load balancing or the thread migration code), lock
626  * acquire operations must be ordered by ascending &runqueue.
627  */
628 struct rq {
629 	/* runqueue lock: */
630 	raw_spinlock_t lock;
631 
632 	/*
633 	 * nr_running and cpu_load should be in the same cacheline because
634 	 * remote CPUs use both these fields when doing load calculation.
635 	 */
636 	unsigned int nr_running;
637 #ifdef CONFIG_NUMA_BALANCING
638 	unsigned int nr_numa_running;
639 	unsigned int nr_preferred_running;
640 #endif
641 	#define CPU_LOAD_IDX_MAX 5
642 	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
643 	unsigned int misfit_task;
644 #ifdef CONFIG_NO_HZ_COMMON
645 #ifdef CONFIG_SMP
646 	unsigned long last_load_update_tick;
647 #endif /* CONFIG_SMP */
648 	unsigned long nohz_flags;
649 #endif /* CONFIG_NO_HZ_COMMON */
650 #ifdef CONFIG_NO_HZ_FULL
651 	unsigned long last_sched_tick;
652 #endif
653 
654 #ifdef CONFIG_CPU_QUIET
655 	/* time-based average load */
656 	u64 nr_last_stamp;
657 	u64 nr_running_integral;
658 	seqcount_t ave_seqcnt;
659 #endif
660 
661 	/* capture load from *all* tasks on this cpu: */
662 	struct load_weight load;
663 	unsigned long nr_load_updates;
664 	u64 nr_switches;
665 
666 	struct cfs_rq cfs;
667 	struct rt_rq rt;
668 	struct dl_rq dl;
669 
670 #ifdef CONFIG_FAIR_GROUP_SCHED
671 	/* list of leaf cfs_rq on this cpu: */
672 	struct list_head leaf_cfs_rq_list;
673 	struct list_head *tmp_alone_branch;
674 #endif /* CONFIG_FAIR_GROUP_SCHED */
675 
676 	/*
677 	 * This is part of a global counter where only the total sum
678 	 * over all CPUs matters. A task can increase this counter on
679 	 * one CPU and if it got migrated afterwards it may decrease
680 	 * it on another CPU. Always updated under the runqueue lock:
681 	 */
682 	unsigned long nr_uninterruptible;
683 
684 	struct task_struct *curr, *idle, *stop;
685 	unsigned long next_balance;
686 	struct mm_struct *prev_mm;
687 
688 	unsigned int clock_skip_update;
689 	u64 clock;
690 	u64 clock_task;
691 
692 	atomic_t nr_iowait;
693 
694 #ifdef CONFIG_SMP
695 	struct root_domain *rd;
696 	struct sched_domain *sd;
697 
698 	unsigned long cpu_capacity;
699 	unsigned long cpu_capacity_orig;
700 
701 	struct callback_head *balance_callback;
702 
703 	unsigned char idle_balance;
704 	/* For active balancing */
705 	int active_balance;
706 	int push_cpu;
707 	struct task_struct *push_task;
708 	struct cpu_stop_work active_balance_work;
709 	/* cpu of this runqueue: */
710 	int cpu;
711 	int online;
712 
713 	struct list_head cfs_tasks;
714 
715 	u64 rt_avg;
716 	u64 age_stamp;
717 	u64 idle_stamp;
718 	u64 avg_idle;
719 
720 	/* This is used to determine avg_idle's max value */
721 	u64 max_idle_balance_cost;
722 #endif
723 
724 #ifdef CONFIG_SCHED_WALT
725 	u64 cumulative_runnable_avg;
726 	u64 window_start;
727 	u64 curr_runnable_sum;
728 	u64 prev_runnable_sum;
729 	u64 nt_curr_runnable_sum;
730 	u64 nt_prev_runnable_sum;
731 	u64 cur_irqload;
732 	u64 avg_irqload;
733 	u64 irqload_ts;
734 	u64 cum_window_demand;
735 #endif /* CONFIG_SCHED_WALT */
736 
737 
738 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
739 	u64 prev_irq_time;
740 #endif
741 #ifdef CONFIG_PARAVIRT
742 	u64 prev_steal_time;
743 #endif
744 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
745 	u64 prev_steal_time_rq;
746 #endif
747 
748 	/* calc_load related fields */
749 	unsigned long calc_load_update;
750 	long calc_load_active;
751 
752 #ifdef CONFIG_SCHED_HRTICK
753 #ifdef CONFIG_SMP
754 	int hrtick_csd_pending;
755 	struct call_single_data hrtick_csd;
756 #endif
757 	struct hrtimer hrtick_timer;
758 #endif
759 
760 #ifdef CONFIG_SCHEDSTATS
761 	/* latency stats */
762 	struct sched_info rq_sched_info;
763 	unsigned long long rq_cpu_time;
764 	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
765 
766 	/* sys_sched_yield() stats */
767 	unsigned int yld_count;
768 
769 	/* schedule() stats */
770 	unsigned int sched_count;
771 	unsigned int sched_goidle;
772 
773 	/* try_to_wake_up() stats */
774 	unsigned int ttwu_count;
775 	unsigned int ttwu_local;
776 #ifdef CONFIG_SMP
777 	struct eas_stats eas_stats;
778 #endif
779 #endif
780 
781 #ifdef CONFIG_SMP
782 	struct llist_head wake_list;
783 #endif
784 
785 #ifdef CONFIG_CPU_IDLE
786 	/* Must be inspected within a rcu lock section */
787 	struct cpuidle_state *idle_state;
788 	int idle_state_idx;
789 #endif
790 };
791 
cpu_of(struct rq * rq)792 static inline int cpu_of(struct rq *rq)
793 {
794 #ifdef CONFIG_SMP
795 	return rq->cpu;
796 #else
797 	return 0;
798 #endif
799 }
800 
801 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
802 
803 #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
804 #define this_rq()		this_cpu_ptr(&runqueues)
805 #define task_rq(p)		cpu_rq(task_cpu(p))
806 #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
807 #define raw_rq()		raw_cpu_ptr(&runqueues)
808 
__rq_clock_broken(struct rq * rq)809 static inline u64 __rq_clock_broken(struct rq *rq)
810 {
811 	return READ_ONCE(rq->clock);
812 }
813 
rq_clock(struct rq * rq)814 static inline u64 rq_clock(struct rq *rq)
815 {
816 	lockdep_assert_held(&rq->lock);
817 	return rq->clock;
818 }
819 
rq_clock_task(struct rq * rq)820 static inline u64 rq_clock_task(struct rq *rq)
821 {
822 	lockdep_assert_held(&rq->lock);
823 	return rq->clock_task;
824 }
825 
826 #define RQCF_REQ_SKIP	0x01
827 #define RQCF_ACT_SKIP	0x02
828 
rq_clock_skip_update(struct rq * rq,bool skip)829 static inline void rq_clock_skip_update(struct rq *rq, bool skip)
830 {
831 	lockdep_assert_held(&rq->lock);
832 	if (skip)
833 		rq->clock_skip_update |= RQCF_REQ_SKIP;
834 	else
835 		rq->clock_skip_update &= ~RQCF_REQ_SKIP;
836 }
837 
838 #ifdef CONFIG_NUMA
839 enum numa_topology_type {
840 	NUMA_DIRECT,
841 	NUMA_GLUELESS_MESH,
842 	NUMA_BACKPLANE,
843 };
844 extern enum numa_topology_type sched_numa_topology_type;
845 extern int sched_max_numa_distance;
846 extern bool find_numa_distance(int distance);
847 #endif
848 
849 #ifdef CONFIG_NUMA_BALANCING
850 /* The regions in numa_faults array from task_struct */
851 enum numa_faults_stats {
852 	NUMA_MEM = 0,
853 	NUMA_CPU,
854 	NUMA_MEMBUF,
855 	NUMA_CPUBUF
856 };
857 extern void sched_setnuma(struct task_struct *p, int node);
858 extern int migrate_task_to(struct task_struct *p, int cpu);
859 extern int migrate_swap(struct task_struct *, struct task_struct *);
860 #endif /* CONFIG_NUMA_BALANCING */
861 
862 #ifdef CONFIG_SMP
863 
864 static inline void
queue_balance_callback(struct rq * rq,struct callback_head * head,void (* func)(struct rq * rq))865 queue_balance_callback(struct rq *rq,
866 		       struct callback_head *head,
867 		       void (*func)(struct rq *rq))
868 {
869 	lockdep_assert_held(&rq->lock);
870 
871 	if (unlikely(head->next))
872 		return;
873 
874 	head->func = (void (*)(struct callback_head *))func;
875 	head->next = rq->balance_callback;
876 	rq->balance_callback = head;
877 }
878 
879 extern void sched_ttwu_pending(void);
880 
881 #define rcu_dereference_check_sched_domain(p) \
882 	rcu_dereference_check((p), \
883 			      lockdep_is_held(&sched_domains_mutex))
884 
885 /*
886  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
887  * See detach_destroy_domains: synchronize_sched for details.
888  *
889  * The domain tree of any CPU may only be accessed from within
890  * preempt-disabled sections.
891  */
892 #define for_each_domain(cpu, __sd) \
893 	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
894 			__sd; __sd = __sd->parent)
895 
896 #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
897 
898 /**
899  * highest_flag_domain - Return highest sched_domain containing flag.
900  * @cpu:	The cpu whose highest level of sched domain is to
901  *		be returned.
902  * @flag:	The flag to check for the highest sched_domain
903  *		for the given cpu.
904  *
905  * Returns the highest sched_domain of a cpu which contains the given flag.
906  */
highest_flag_domain(int cpu,int flag)907 static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
908 {
909 	struct sched_domain *sd, *hsd = NULL;
910 
911 	for_each_domain(cpu, sd) {
912 		if (!(sd->flags & flag))
913 			break;
914 		hsd = sd;
915 	}
916 
917 	return hsd;
918 }
919 
lowest_flag_domain(int cpu,int flag)920 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
921 {
922 	struct sched_domain *sd;
923 
924 	for_each_domain(cpu, sd) {
925 		if (sd->flags & flag)
926 			break;
927 	}
928 
929 	return sd;
930 }
931 
932 DECLARE_PER_CPU(struct sched_domain *, sd_llc);
933 DECLARE_PER_CPU(int, sd_llc_size);
934 DECLARE_PER_CPU(int, sd_llc_id);
935 DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
936 DECLARE_PER_CPU(struct sched_domain *, sd_numa);
937 DECLARE_PER_CPU(struct sched_domain *, sd_asym);
938 DECLARE_PER_CPU(struct sched_domain *, sd_ea);
939 DECLARE_PER_CPU(struct sched_domain *, sd_scs);
940 
941 struct sched_group_capacity {
942 	atomic_t ref;
943 	/*
944 	 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
945 	 * for a single CPU.
946 	 */
947 	unsigned long capacity;
948 	unsigned long max_capacity; /* Max per-cpu capacity in group */
949 	unsigned long min_capacity; /* Min per-CPU capacity in group */
950 	unsigned long next_update;
951 	int imbalance; /* XXX unrelated to capacity but shared group state */
952 
953 	unsigned long cpumask[0]; /* iteration mask */
954 };
955 
956 struct sched_group {
957 	struct sched_group *next;	/* Must be a circular list */
958 	atomic_t ref;
959 
960 	unsigned int group_weight;
961 	struct sched_group_capacity *sgc;
962 	const struct sched_group_energy *sge;
963 
964 	/*
965 	 * The CPUs this group covers.
966 	 *
967 	 * NOTE: this field is variable length. (Allocated dynamically
968 	 * by attaching extra space to the end of the structure,
969 	 * depending on how many CPUs the kernel has booted up with)
970 	 */
971 	unsigned long cpumask[0];
972 };
973 
sched_group_cpus(struct sched_group * sg)974 static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
975 {
976 	return to_cpumask(sg->cpumask);
977 }
978 
979 /*
980  * cpumask masking which cpus in the group are allowed to iterate up the domain
981  * tree.
982  */
sched_group_mask(struct sched_group * sg)983 static inline struct cpumask *sched_group_mask(struct sched_group *sg)
984 {
985 	return to_cpumask(sg->sgc->cpumask);
986 }
987 
988 /**
989  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
990  * @group: The group whose first cpu is to be returned.
991  */
group_first_cpu(struct sched_group * group)992 static inline unsigned int group_first_cpu(struct sched_group *group)
993 {
994 	return cpumask_first(sched_group_cpus(group));
995 }
996 
997 extern int group_balance_cpu(struct sched_group *sg);
998 
999 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
1000 void register_sched_domain_sysctl(void);
1001 void unregister_sched_domain_sysctl(void);
1002 #else
register_sched_domain_sysctl(void)1003 static inline void register_sched_domain_sysctl(void)
1004 {
1005 }
unregister_sched_domain_sysctl(void)1006 static inline void unregister_sched_domain_sysctl(void)
1007 {
1008 }
1009 #endif
1010 
1011 #else
1012 
sched_ttwu_pending(void)1013 static inline void sched_ttwu_pending(void) { }
1014 
1015 #endif /* CONFIG_SMP */
1016 
1017 #include "stats.h"
1018 #include "auto_group.h"
1019 
1020 #ifdef CONFIG_CGROUP_SCHED
1021 
1022 /*
1023  * Return the group to which this tasks belongs.
1024  *
1025  * We cannot use task_css() and friends because the cgroup subsystem
1026  * changes that value before the cgroup_subsys::attach() method is called,
1027  * therefore we cannot pin it and might observe the wrong value.
1028  *
1029  * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
1030  * core changes this before calling sched_move_task().
1031  *
1032  * Instead we use a 'copy' which is updated from sched_move_task() while
1033  * holding both task_struct::pi_lock and rq::lock.
1034  */
task_group(struct task_struct * p)1035 static inline struct task_group *task_group(struct task_struct *p)
1036 {
1037 	return p->sched_task_group;
1038 }
1039 
1040 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
set_task_rq(struct task_struct * p,unsigned int cpu)1041 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1042 {
1043 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1044 	struct task_group *tg = task_group(p);
1045 #endif
1046 
1047 #ifdef CONFIG_FAIR_GROUP_SCHED
1048 	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1049 	p->se.cfs_rq = tg->cfs_rq[cpu];
1050 	p->se.parent = tg->se[cpu];
1051 #endif
1052 
1053 #ifdef CONFIG_RT_GROUP_SCHED
1054 	p->rt.rt_rq  = tg->rt_rq[cpu];
1055 	p->rt.parent = tg->rt_se[cpu];
1056 #endif
1057 }
1058 
1059 #else /* CONFIG_CGROUP_SCHED */
1060 
set_task_rq(struct task_struct * p,unsigned int cpu)1061 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
task_group(struct task_struct * p)1062 static inline struct task_group *task_group(struct task_struct *p)
1063 {
1064 	return NULL;
1065 }
1066 
1067 #endif /* CONFIG_CGROUP_SCHED */
1068 
__set_task_cpu(struct task_struct * p,unsigned int cpu)1069 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1070 {
1071 	set_task_rq(p, cpu);
1072 #ifdef CONFIG_SMP
1073 	/*
1074 	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1075 	 * successfuly executed on another CPU. We must ensure that updates of
1076 	 * per-task data have been completed by this moment.
1077 	 */
1078 	smp_wmb();
1079 #ifdef CONFIG_THREAD_INFO_IN_TASK
1080 	p->cpu = cpu;
1081 #else
1082 	task_thread_info(p)->cpu = cpu;
1083 #endif
1084 	p->wake_cpu = cpu;
1085 #endif
1086 }
1087 
1088 /*
1089  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1090  */
1091 #ifdef CONFIG_SCHED_DEBUG
1092 # include <linux/static_key.h>
1093 # define const_debug __read_mostly
1094 #else
1095 # define const_debug const
1096 #endif
1097 
1098 extern const_debug unsigned int sysctl_sched_features;
1099 
1100 #define SCHED_FEAT(name, enabled)	\
1101 	__SCHED_FEAT_##name ,
1102 
1103 enum {
1104 #include "features.h"
1105 	__SCHED_FEAT_NR,
1106 };
1107 
1108 #undef SCHED_FEAT
1109 
1110 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
1111 #define SCHED_FEAT(name, enabled)					\
1112 static __always_inline bool static_branch_##name(struct static_key *key) \
1113 {									\
1114 	return static_key_##enabled(key);				\
1115 }
1116 
1117 #include "features.h"
1118 
1119 #undef SCHED_FEAT
1120 
1121 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1122 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1123 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
1124 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1125 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
1126 
1127 extern struct static_key_false sched_numa_balancing;
1128 extern struct static_key_false sched_schedstats;
1129 
global_rt_period(void)1130 static inline u64 global_rt_period(void)
1131 {
1132 	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1133 }
1134 
global_rt_runtime(void)1135 static inline u64 global_rt_runtime(void)
1136 {
1137 	if (sysctl_sched_rt_runtime < 0)
1138 		return RUNTIME_INF;
1139 
1140 	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1141 }
1142 
task_current(struct rq * rq,struct task_struct * p)1143 static inline int task_current(struct rq *rq, struct task_struct *p)
1144 {
1145 	return rq->curr == p;
1146 }
1147 
task_running(struct rq * rq,struct task_struct * p)1148 static inline int task_running(struct rq *rq, struct task_struct *p)
1149 {
1150 #ifdef CONFIG_SMP
1151 	return p->on_cpu;
1152 #else
1153 	return task_current(rq, p);
1154 #endif
1155 }
1156 
task_on_rq_queued(struct task_struct * p)1157 static inline int task_on_rq_queued(struct task_struct *p)
1158 {
1159 	return p->on_rq == TASK_ON_RQ_QUEUED;
1160 }
1161 
task_on_rq_migrating(struct task_struct * p)1162 static inline int task_on_rq_migrating(struct task_struct *p)
1163 {
1164 	return p->on_rq == TASK_ON_RQ_MIGRATING;
1165 }
1166 
1167 #ifndef prepare_arch_switch
1168 # define prepare_arch_switch(next)	do { } while (0)
1169 #endif
1170 #ifndef finish_arch_post_lock_switch
1171 # define finish_arch_post_lock_switch()	do { } while (0)
1172 #endif
1173 
prepare_lock_switch(struct rq * rq,struct task_struct * next)1174 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1175 {
1176 #ifdef CONFIG_SMP
1177 	/*
1178 	 * We can optimise this out completely for !SMP, because the
1179 	 * SMP rebalancing from interrupt is the only thing that cares
1180 	 * here.
1181 	 */
1182 	next->on_cpu = 1;
1183 #endif
1184 }
1185 
finish_lock_switch(struct rq * rq,struct task_struct * prev)1186 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1187 {
1188 #ifdef CONFIG_SMP
1189 	/*
1190 	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1191 	 * We must ensure this doesn't happen until the switch is completely
1192 	 * finished.
1193 	 *
1194 	 * In particular, the load of prev->state in finish_task_switch() must
1195 	 * happen before this.
1196 	 *
1197 	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
1198 	 */
1199 	smp_store_release(&prev->on_cpu, 0);
1200 #endif
1201 #ifdef CONFIG_DEBUG_SPINLOCK
1202 	/* this is a valid case when another task releases the spinlock */
1203 	rq->lock.owner = current;
1204 #endif
1205 	/*
1206 	 * If we are tracking spinlock dependencies then we have to
1207 	 * fix up the runqueue lock - which gets 'carried over' from
1208 	 * prev into current:
1209 	 */
1210 	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1211 
1212 	raw_spin_unlock_irq(&rq->lock);
1213 }
1214 
1215 /*
1216  * wake flags
1217  */
1218 #define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
1219 #define WF_FORK		0x02		/* child wakeup after fork */
1220 #define WF_MIGRATED	0x4		/* internal use, task got migrated */
1221 
1222 /*
1223  * To aid in avoiding the subversion of "niceness" due to uneven distribution
1224  * of tasks with abnormal "nice" values across CPUs the contribution that
1225  * each task makes to its run queue's load is weighted according to its
1226  * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1227  * scaled version of the new time slice allocation that they receive on time
1228  * slice expiry etc.
1229  */
1230 
1231 #define WEIGHT_IDLEPRIO                3
1232 #define WMULT_IDLEPRIO         1431655765
1233 
1234 extern const int sched_prio_to_weight[40];
1235 extern const u32 sched_prio_to_wmult[40];
1236 
1237 /*
1238  * {de,en}queue flags:
1239  *
1240  * DEQUEUE_SLEEP  - task is no longer runnable
1241  * ENQUEUE_WAKEUP - task just became runnable
1242  *
1243  * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1244  *                are in a known state which allows modification. Such pairs
1245  *                should preserve as much state as possible.
1246  *
1247  * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1248  *        in the runqueue.
1249  *
1250  * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
1251  * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
1252  * ENQUEUE_MIGRATED  - the task was migrated during wakeup
1253  *
1254  */
1255 
1256 #define DEQUEUE_SLEEP		0x01
1257 #define DEQUEUE_SAVE		0x02 /* matches ENQUEUE_RESTORE */
1258 #define DEQUEUE_MOVE		0x04 /* matches ENQUEUE_MOVE */
1259 
1260 #define ENQUEUE_WAKEUP		0x01
1261 #define ENQUEUE_RESTORE		0x02
1262 #define ENQUEUE_MOVE		0x04
1263 
1264 #define ENQUEUE_HEAD		0x08
1265 #define ENQUEUE_REPLENISH	0x10
1266 #ifdef CONFIG_SMP
1267 #define ENQUEUE_MIGRATED	0x20
1268 #else
1269 #define ENQUEUE_MIGRATED	0x00
1270 #endif
1271 #define ENQUEUE_WAKEUP_NEW	0x40
1272 
1273 #define RETRY_TASK		((void *)-1UL)
1274 
1275 struct sched_class {
1276 	const struct sched_class *next;
1277 
1278 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1279 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1280 	void (*yield_task) (struct rq *rq);
1281 	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1282 
1283 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1284 
1285 	/*
1286 	 * It is the responsibility of the pick_next_task() method that will
1287 	 * return the next task to call put_prev_task() on the @prev task or
1288 	 * something equivalent.
1289 	 *
1290 	 * May return RETRY_TASK when it finds a higher prio class has runnable
1291 	 * tasks.
1292 	 */
1293 	struct task_struct * (*pick_next_task) (struct rq *rq,
1294 						struct task_struct *prev,
1295 						struct pin_cookie cookie);
1296 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1297 
1298 #ifdef CONFIG_SMP
1299 	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1300 	void (*migrate_task_rq)(struct task_struct *p);
1301 
1302 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1303 
1304 	void (*set_cpus_allowed)(struct task_struct *p,
1305 				 const struct cpumask *newmask);
1306 
1307 	void (*rq_online)(struct rq *rq);
1308 	void (*rq_offline)(struct rq *rq);
1309 #endif
1310 
1311 	void (*set_curr_task) (struct rq *rq);
1312 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1313 	void (*task_fork) (struct task_struct *p);
1314 	void (*task_dead) (struct task_struct *p);
1315 
1316 	/*
1317 	 * The switched_from() call is allowed to drop rq->lock, therefore we
1318 	 * cannot assume the switched_from/switched_to pair is serliazed by
1319 	 * rq->lock. They are however serialized by p->pi_lock.
1320 	 */
1321 	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1322 	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1323 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1324 			     int oldprio);
1325 
1326 	unsigned int (*get_rr_interval) (struct rq *rq,
1327 					 struct task_struct *task);
1328 
1329 	void (*update_curr) (struct rq *rq);
1330 
1331 #define TASK_SET_GROUP  0
1332 #define TASK_MOVE_GROUP	1
1333 
1334 #ifdef CONFIG_FAIR_GROUP_SCHED
1335 	void (*task_change_group) (struct task_struct *p, int type);
1336 #endif
1337 };
1338 
put_prev_task(struct rq * rq,struct task_struct * prev)1339 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1340 {
1341 	prev->sched_class->put_prev_task(rq, prev);
1342 }
1343 
set_curr_task(struct rq * rq,struct task_struct * curr)1344 static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1345 {
1346 	curr->sched_class->set_curr_task(rq);
1347 }
1348 
1349 #define sched_class_highest (&stop_sched_class)
1350 #define for_each_class(class) \
1351    for (class = sched_class_highest; class; class = class->next)
1352 
1353 extern const struct sched_class stop_sched_class;
1354 extern const struct sched_class dl_sched_class;
1355 extern const struct sched_class rt_sched_class;
1356 extern const struct sched_class fair_sched_class;
1357 extern const struct sched_class idle_sched_class;
1358 
1359 
1360 #ifdef CONFIG_SMP
1361 
1362 extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
1363 extern void update_group_capacity(struct sched_domain *sd, int cpu);
1364 
1365 extern void trigger_load_balance(struct rq *rq);
1366 
1367 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1368 
1369 #endif
1370 
1371 #ifdef CONFIG_CPU_IDLE
idle_set_state(struct rq * rq,struct cpuidle_state * idle_state)1372 static inline void idle_set_state(struct rq *rq,
1373 				  struct cpuidle_state *idle_state)
1374 {
1375 	rq->idle_state = idle_state;
1376 }
1377 
idle_get_state(struct rq * rq)1378 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1379 {
1380 	SCHED_WARN_ON(!rcu_read_lock_held());
1381 	return rq->idle_state;
1382 }
1383 
idle_set_state_idx(struct rq * rq,int idle_state_idx)1384 static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
1385 {
1386 	rq->idle_state_idx = idle_state_idx;
1387 }
1388 
idle_get_state_idx(struct rq * rq)1389 static inline int idle_get_state_idx(struct rq *rq)
1390 {
1391 	WARN_ON(!rcu_read_lock_held());
1392 	return rq->idle_state_idx;
1393 }
1394 #else
idle_set_state(struct rq * rq,struct cpuidle_state * idle_state)1395 static inline void idle_set_state(struct rq *rq,
1396 				  struct cpuidle_state *idle_state)
1397 {
1398 }
1399 
idle_get_state(struct rq * rq)1400 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1401 {
1402 	return NULL;
1403 }
1404 
idle_set_state_idx(struct rq * rq,int idle_state_idx)1405 static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
1406 {
1407 }
1408 
idle_get_state_idx(struct rq * rq)1409 static inline int idle_get_state_idx(struct rq *rq)
1410 {
1411 	return -1;
1412 }
1413 #endif
1414 
1415 extern void sysrq_sched_debug_show(void);
1416 extern void sched_init_granularity(void);
1417 extern void update_max_interval(void);
1418 
1419 extern void init_sched_dl_class(void);
1420 extern void init_sched_rt_class(void);
1421 extern void init_sched_fair_class(void);
1422 
1423 extern void resched_curr(struct rq *rq);
1424 extern void resched_cpu(int cpu);
1425 
1426 extern struct rt_bandwidth def_rt_bandwidth;
1427 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1428 
1429 extern struct dl_bandwidth def_dl_bandwidth;
1430 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1431 extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1432 
1433 unsigned long to_ratio(u64 period, u64 runtime);
1434 
1435 extern void init_entity_runnable_average(struct sched_entity *se);
1436 extern void post_init_entity_util_avg(struct sched_entity *se);
1437 
1438 #ifdef CONFIG_NO_HZ_FULL
1439 extern bool sched_can_stop_tick(struct rq *rq);
1440 
1441 /*
1442  * Tick may be needed by tasks in the runqueue depending on their policy and
1443  * requirements. If tick is needed, lets send the target an IPI to kick it out of
1444  * nohz mode if necessary.
1445  */
sched_update_tick_dependency(struct rq * rq)1446 static inline void sched_update_tick_dependency(struct rq *rq)
1447 {
1448 	int cpu;
1449 
1450 	if (!tick_nohz_full_enabled())
1451 		return;
1452 
1453 	cpu = cpu_of(rq);
1454 
1455 	if (!tick_nohz_full_cpu(cpu))
1456 		return;
1457 
1458 	if (sched_can_stop_tick(rq))
1459 		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1460 	else
1461 		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1462 }
1463 #else
sched_update_tick_dependency(struct rq * rq)1464 static inline void sched_update_tick_dependency(struct rq *rq) { }
1465 #endif
1466 
__add_nr_running(struct rq * rq,unsigned count)1467 static inline void __add_nr_running(struct rq *rq, unsigned count)
1468 {
1469 	unsigned prev_nr = rq->nr_running;
1470 
1471 	rq->nr_running = prev_nr + count;
1472 
1473 	if (prev_nr < 2 && rq->nr_running >= 2) {
1474 #ifdef CONFIG_SMP
1475 		if (!rq->rd->overload)
1476 			rq->rd->overload = true;
1477 #endif
1478 	}
1479 
1480 	sched_update_tick_dependency(rq);
1481 }
1482 
__sub_nr_running(struct rq * rq,unsigned count)1483 static inline void __sub_nr_running(struct rq *rq, unsigned count)
1484 {
1485 	rq->nr_running -= count;
1486 	/* Check if we still need preemption */
1487 	sched_update_tick_dependency(rq);
1488 }
1489 
1490 #ifdef CONFIG_CPU_QUIET
1491 #define NR_AVE_SCALE(x)		((x) << FSHIFT)
do_nr_running_integral(struct rq * rq)1492 static inline u64 do_nr_running_integral(struct rq *rq)
1493 {
1494 	s64 nr, deltax;
1495 	u64 nr_running_integral = rq->nr_running_integral;
1496 
1497 	deltax = rq->clock_task - rq->nr_last_stamp;
1498 	nr = NR_AVE_SCALE(rq->nr_running);
1499 
1500 	nr_running_integral += nr * deltax;
1501 
1502 	return nr_running_integral;
1503 }
1504 
add_nr_running(struct rq * rq,unsigned count)1505 static inline void add_nr_running(struct rq *rq, unsigned count)
1506 {
1507 	write_seqcount_begin(&rq->ave_seqcnt);
1508 	rq->nr_running_integral = do_nr_running_integral(rq);
1509 	rq->nr_last_stamp = rq->clock_task;
1510 	__add_nr_running(rq, count);
1511 	write_seqcount_end(&rq->ave_seqcnt);
1512 }
1513 
sub_nr_running(struct rq * rq,unsigned count)1514 static inline void sub_nr_running(struct rq *rq, unsigned count)
1515 {
1516 	write_seqcount_begin(&rq->ave_seqcnt);
1517 	rq->nr_running_integral = do_nr_running_integral(rq);
1518 	rq->nr_last_stamp = rq->clock_task;
1519 	__sub_nr_running(rq, count);
1520 	write_seqcount_end(&rq->ave_seqcnt);
1521 }
1522 #else
1523 #define add_nr_running __add_nr_running
1524 #define sub_nr_running __sub_nr_running
1525 #endif
1526 
rq_last_tick_reset(struct rq * rq)1527 static inline void rq_last_tick_reset(struct rq *rq)
1528 {
1529 #ifdef CONFIG_NO_HZ_FULL
1530 	rq->last_sched_tick = jiffies;
1531 #endif
1532 }
1533 
1534 extern void update_rq_clock(struct rq *rq);
1535 
1536 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1537 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1538 
1539 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1540 
1541 extern const_debug unsigned int sysctl_sched_time_avg;
1542 extern const_debug unsigned int sysctl_sched_nr_migrate;
1543 extern const_debug unsigned int sysctl_sched_migration_cost;
1544 
sched_avg_period(void)1545 static inline u64 sched_avg_period(void)
1546 {
1547 	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1548 }
1549 
1550 #ifdef CONFIG_SCHED_HRTICK
1551 
1552 /*
1553  * Use hrtick when:
1554  *  - enabled by features
1555  *  - hrtimer is actually high res
1556  */
hrtick_enabled(struct rq * rq)1557 static inline int hrtick_enabled(struct rq *rq)
1558 {
1559 	if (!sched_feat(HRTICK))
1560 		return 0;
1561 	if (!cpu_active(cpu_of(rq)))
1562 		return 0;
1563 	return hrtimer_is_hres_active(&rq->hrtick_timer);
1564 }
1565 
1566 void hrtick_start(struct rq *rq, u64 delay);
1567 
1568 #else
1569 
hrtick_enabled(struct rq * rq)1570 static inline int hrtick_enabled(struct rq *rq)
1571 {
1572 	return 0;
1573 }
1574 
1575 #endif /* CONFIG_SCHED_HRTICK */
1576 
1577 #ifdef CONFIG_SMP
1578 extern void sched_avg_update(struct rq *rq);
1579 
1580 #ifndef arch_scale_freq_capacity
1581 static __always_inline
arch_scale_freq_capacity(struct sched_domain * sd,int cpu)1582 unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1583 {
1584 	return SCHED_CAPACITY_SCALE;
1585 }
1586 #endif
1587 
1588 #ifndef arch_scale_max_freq_capacity
1589 static __always_inline
arch_scale_max_freq_capacity(struct sched_domain * sd,int cpu)1590 unsigned long arch_scale_max_freq_capacity(struct sched_domain *sd, int cpu)
1591 {
1592 	return SCHED_CAPACITY_SCALE;
1593 }
1594 #endif
1595 
1596 #ifndef arch_scale_min_freq_capacity
1597 static __always_inline
arch_scale_min_freq_capacity(struct sched_domain * sd,int cpu)1598 unsigned long arch_scale_min_freq_capacity(struct sched_domain *sd, int cpu)
1599 {
1600 	/*
1601 	 * Multiplied with any capacity value, this scale factor will return
1602 	 * 0, which represents an un-capped state
1603 	 */
1604 	return 0;
1605 }
1606 #endif
1607 
1608 #ifndef arch_scale_cpu_capacity
1609 static __always_inline
arch_scale_cpu_capacity(struct sched_domain * sd,int cpu)1610 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1611 {
1612 	if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
1613 		return sd->smt_gain / sd->span_weight;
1614 
1615 	return SCHED_CAPACITY_SCALE;
1616 }
1617 #endif
1618 
1619 #ifdef CONFIG_SMP
capacity_of(int cpu)1620 static inline unsigned long capacity_of(int cpu)
1621 {
1622 	return cpu_rq(cpu)->cpu_capacity;
1623 }
1624 
capacity_orig_of(int cpu)1625 static inline unsigned long capacity_orig_of(int cpu)
1626 {
1627 	return cpu_rq(cpu)->cpu_capacity_orig;
1628 }
1629 
1630 extern unsigned int sysctl_sched_use_walt_cpu_util;
1631 extern unsigned int walt_ravg_window;
1632 extern bool walt_disabled;
1633 
1634 /*
1635  * cpu_util returns the amount of capacity of a CPU that is used by CFS
1636  * tasks. The unit of the return value must be the one of capacity so we can
1637  * compare the utilization with the capacity of the CPU that is available for
1638  * CFS task (ie cpu_capacity).
1639  *
1640  * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
1641  * recent utilization of currently non-runnable tasks on a CPU. It represents
1642  * the amount of utilization of a CPU in the range [0..capacity_orig] where
1643  * capacity_orig is the cpu_capacity available at the highest frequency
1644  * (arch_scale_freq_capacity()).
1645  * The utilization of a CPU converges towards a sum equal to or less than the
1646  * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
1647  * the running time on this CPU scaled by capacity_curr.
1648  *
1649  * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
1650  * higher than capacity_orig because of unfortunate rounding in
1651  * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
1652  * the average stabilizes with the new running time. We need to check that the
1653  * utilization stays within the range of [0..capacity_orig] and cap it if
1654  * necessary. Without utilization capping, a group could be seen as overloaded
1655  * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
1656  * available capacity. We allow utilization to overshoot capacity_curr (but not
1657  * capacity_orig) as it useful for predicting the capacity required after task
1658  * migrations (scheduler-driven DVFS).
1659  */
__cpu_util(int cpu,int delta)1660 static inline unsigned long __cpu_util(int cpu, int delta)
1661 {
1662 	unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
1663 	unsigned long capacity = capacity_orig_of(cpu);
1664 
1665 #ifdef CONFIG_SCHED_WALT
1666 	if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
1667 		util = div64_u64(cpu_rq(cpu)->cumulative_runnable_avg,
1668 			       walt_ravg_window >> SCHED_CAPACITY_SHIFT);
1669 #endif
1670 	delta += util;
1671 	if (delta < 0)
1672 		return 0;
1673 
1674 	return (delta >= capacity) ? capacity : delta;
1675 }
1676 
cpu_util(int cpu)1677 static inline unsigned long cpu_util(int cpu)
1678 {
1679 	return __cpu_util(cpu, 0);
1680 }
1681 
cpu_util_freq(int cpu)1682 static inline unsigned long cpu_util_freq(int cpu)
1683 {
1684 	unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
1685 	unsigned long capacity = capacity_orig_of(cpu);
1686 
1687 #ifdef CONFIG_SCHED_WALT
1688 	if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
1689 		util = div64_u64(cpu_rq(cpu)->prev_runnable_sum,
1690 				 walt_ravg_window >> SCHED_CAPACITY_SHIFT);
1691 #endif
1692 	return (util >= capacity) ? capacity : util;
1693 }
1694 
1695 #endif
1696 
sched_rt_avg_update(struct rq * rq,u64 rt_delta)1697 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1698 {
1699 	rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
1700 }
1701 #else
sched_rt_avg_update(struct rq * rq,u64 rt_delta)1702 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
sched_avg_update(struct rq * rq)1703 static inline void sched_avg_update(struct rq *rq) { }
1704 #endif
1705 
1706 struct rq_flags {
1707 	unsigned long flags;
1708 	struct pin_cookie cookie;
1709 };
1710 
1711 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1712 	__acquires(rq->lock);
1713 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1714 	__acquires(p->pi_lock)
1715 	__acquires(rq->lock);
1716 
__task_rq_unlock(struct rq * rq,struct rq_flags * rf)1717 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1718 	__releases(rq->lock)
1719 {
1720 	lockdep_unpin_lock(&rq->lock, rf->cookie);
1721 	raw_spin_unlock(&rq->lock);
1722 }
1723 
1724 static inline void
task_rq_unlock(struct rq * rq,struct task_struct * p,struct rq_flags * rf)1725 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1726 	__releases(rq->lock)
1727 	__releases(p->pi_lock)
1728 {
1729 	lockdep_unpin_lock(&rq->lock, rf->cookie);
1730 	raw_spin_unlock(&rq->lock);
1731 	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1732 }
1733 
1734 extern struct rq *lock_rq_of(struct task_struct *p, struct rq_flags *flags);
1735 extern void unlock_rq_of(struct rq *rq, struct task_struct *p, struct rq_flags *flags);
1736 
1737 #ifdef CONFIG_SMP
1738 #ifdef CONFIG_PREEMPT
1739 
1740 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1741 
1742 /*
1743  * fair double_lock_balance: Safely acquires both rq->locks in a fair
1744  * way at the expense of forcing extra atomic operations in all
1745  * invocations.  This assures that the double_lock is acquired using the
1746  * same underlying policy as the spinlock_t on this architecture, which
1747  * reduces latency compared to the unfair variant below.  However, it
1748  * also adds more overhead and therefore may reduce throughput.
1749  */
_double_lock_balance(struct rq * this_rq,struct rq * busiest)1750 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1751 	__releases(this_rq->lock)
1752 	__acquires(busiest->lock)
1753 	__acquires(this_rq->lock)
1754 {
1755 	raw_spin_unlock(&this_rq->lock);
1756 	double_rq_lock(this_rq, busiest);
1757 
1758 	return 1;
1759 }
1760 
1761 #else
1762 /*
1763  * Unfair double_lock_balance: Optimizes throughput at the expense of
1764  * latency by eliminating extra atomic operations when the locks are
1765  * already in proper order on entry.  This favors lower cpu-ids and will
1766  * grant the double lock to lower cpus over higher ids under contention,
1767  * regardless of entry order into the function.
1768  */
_double_lock_balance(struct rq * this_rq,struct rq * busiest)1769 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1770 	__releases(this_rq->lock)
1771 	__acquires(busiest->lock)
1772 	__acquires(this_rq->lock)
1773 {
1774 	int ret = 0;
1775 
1776 	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1777 		if (busiest < this_rq) {
1778 			raw_spin_unlock(&this_rq->lock);
1779 			raw_spin_lock(&busiest->lock);
1780 			raw_spin_lock_nested(&this_rq->lock,
1781 					      SINGLE_DEPTH_NESTING);
1782 			ret = 1;
1783 		} else
1784 			raw_spin_lock_nested(&busiest->lock,
1785 					      SINGLE_DEPTH_NESTING);
1786 	}
1787 	return ret;
1788 }
1789 
1790 #endif /* CONFIG_PREEMPT */
1791 
1792 /*
1793  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1794  */
double_lock_balance(struct rq * this_rq,struct rq * busiest)1795 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1796 {
1797 	if (unlikely(!irqs_disabled())) {
1798 		/* printk() doesn't work good under rq->lock */
1799 		raw_spin_unlock(&this_rq->lock);
1800 		BUG_ON(1);
1801 	}
1802 
1803 	return _double_lock_balance(this_rq, busiest);
1804 }
1805 
double_unlock_balance(struct rq * this_rq,struct rq * busiest)1806 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1807 	__releases(busiest->lock)
1808 {
1809 	if (this_rq != busiest)
1810 		raw_spin_unlock(&busiest->lock);
1811 	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1812 }
1813 
double_lock(spinlock_t * l1,spinlock_t * l2)1814 static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1815 {
1816 	if (l1 > l2)
1817 		swap(l1, l2);
1818 
1819 	spin_lock(l1);
1820 	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1821 }
1822 
double_lock_irq(spinlock_t * l1,spinlock_t * l2)1823 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1824 {
1825 	if (l1 > l2)
1826 		swap(l1, l2);
1827 
1828 	spin_lock_irq(l1);
1829 	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1830 }
1831 
double_raw_lock(raw_spinlock_t * l1,raw_spinlock_t * l2)1832 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1833 {
1834 	if (l1 > l2)
1835 		swap(l1, l2);
1836 
1837 	raw_spin_lock(l1);
1838 	raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1839 }
1840 
1841 /*
1842  * double_rq_lock - safely lock two runqueues
1843  *
1844  * Note this does not disable interrupts like task_rq_lock,
1845  * you need to do so manually before calling.
1846  */
double_rq_lock(struct rq * rq1,struct rq * rq2)1847 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1848 	__acquires(rq1->lock)
1849 	__acquires(rq2->lock)
1850 {
1851 	BUG_ON(!irqs_disabled());
1852 	if (rq1 == rq2) {
1853 		raw_spin_lock(&rq1->lock);
1854 		__acquire(rq2->lock);	/* Fake it out ;) */
1855 	} else {
1856 		if (rq1 < rq2) {
1857 			raw_spin_lock(&rq1->lock);
1858 			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1859 		} else {
1860 			raw_spin_lock(&rq2->lock);
1861 			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1862 		}
1863 	}
1864 }
1865 
1866 /*
1867  * double_rq_unlock - safely unlock two runqueues
1868  *
1869  * Note this does not restore interrupts like task_rq_unlock,
1870  * you need to do so manually after calling.
1871  */
double_rq_unlock(struct rq * rq1,struct rq * rq2)1872 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1873 	__releases(rq1->lock)
1874 	__releases(rq2->lock)
1875 {
1876 	raw_spin_unlock(&rq1->lock);
1877 	if (rq1 != rq2)
1878 		raw_spin_unlock(&rq2->lock);
1879 	else
1880 		__release(rq2->lock);
1881 }
1882 
1883 #else /* CONFIG_SMP */
1884 
1885 /*
1886  * double_rq_lock - safely lock two runqueues
1887  *
1888  * Note this does not disable interrupts like task_rq_lock,
1889  * you need to do so manually before calling.
1890  */
double_rq_lock(struct rq * rq1,struct rq * rq2)1891 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1892 	__acquires(rq1->lock)
1893 	__acquires(rq2->lock)
1894 {
1895 	BUG_ON(!irqs_disabled());
1896 	BUG_ON(rq1 != rq2);
1897 	raw_spin_lock(&rq1->lock);
1898 	__acquire(rq2->lock);	/* Fake it out ;) */
1899 }
1900 
1901 /*
1902  * double_rq_unlock - safely unlock two runqueues
1903  *
1904  * Note this does not restore interrupts like task_rq_unlock,
1905  * you need to do so manually after calling.
1906  */
double_rq_unlock(struct rq * rq1,struct rq * rq2)1907 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1908 	__releases(rq1->lock)
1909 	__releases(rq2->lock)
1910 {
1911 	BUG_ON(rq1 != rq2);
1912 	raw_spin_unlock(&rq1->lock);
1913 	__release(rq2->lock);
1914 }
1915 
1916 #endif
1917 
1918 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1919 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1920 
1921 #ifdef	CONFIG_SCHED_DEBUG
1922 extern void print_cfs_stats(struct seq_file *m, int cpu);
1923 extern void print_rt_stats(struct seq_file *m, int cpu);
1924 extern void print_dl_stats(struct seq_file *m, int cpu);
1925 extern void
1926 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
1927 
1928 #ifdef CONFIG_NUMA_BALANCING
1929 extern void
1930 show_numa_stats(struct task_struct *p, struct seq_file *m);
1931 extern void
1932 print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1933 	unsigned long tpf, unsigned long gsf, unsigned long gpf);
1934 #endif /* CONFIG_NUMA_BALANCING */
1935 #endif /* CONFIG_SCHED_DEBUG */
1936 
1937 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1938 extern void init_rt_rq(struct rt_rq *rt_rq);
1939 extern void init_dl_rq(struct dl_rq *dl_rq);
1940 
1941 extern void cfs_bandwidth_usage_inc(void);
1942 extern void cfs_bandwidth_usage_dec(void);
1943 
1944 #ifdef CONFIG_NO_HZ_COMMON
1945 enum rq_nohz_flag_bits {
1946 	NOHZ_TICK_STOPPED,
1947 	NOHZ_BALANCE_KICK,
1948 };
1949 
1950 #define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
1951 
1952 extern void nohz_balance_exit_idle(unsigned int cpu);
1953 #else
nohz_balance_exit_idle(unsigned int cpu)1954 static inline void nohz_balance_exit_idle(unsigned int cpu) { }
1955 #endif
1956 
1957 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1958 struct irqtime {
1959 	u64			hardirq_time;
1960 	u64			softirq_time;
1961 	u64			irq_start_time;
1962 	struct u64_stats_sync	sync;
1963 };
1964 
1965 DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
1966 
irq_time_read(int cpu)1967 static inline u64 irq_time_read(int cpu)
1968 {
1969 	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
1970 	unsigned int seq;
1971 	u64 total;
1972 
1973 	do {
1974 		seq = __u64_stats_fetch_begin(&irqtime->sync);
1975 		total = irqtime->softirq_time + irqtime->hardirq_time;
1976 	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
1977 
1978 	return total;
1979 }
1980 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1981 
1982 #ifdef CONFIG_CPU_FREQ
1983 DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
1984 
1985 /**
1986  * cpufreq_update_util - Take a note about CPU utilization changes.
1987  * @rq: Runqueue to carry out the update for.
1988  * @flags: Update reason flags.
1989  *
1990  * This function is called by the scheduler on the CPU whose utilization is
1991  * being updated.
1992  *
1993  * It can only be called from RCU-sched read-side critical sections.
1994  *
1995  * The way cpufreq is currently arranged requires it to evaluate the CPU
1996  * performance state (frequency/voltage) on a regular basis to prevent it from
1997  * being stuck in a completely inadequate performance level for too long.
1998  * That is not guaranteed to happen if the updates are only triggered from CFS,
1999  * though, because they may not be coming in if RT or deadline tasks are active
2000  * all the time (or there are RT and DL tasks only).
2001  *
2002  * As a workaround for that issue, this function is called by the RT and DL
2003  * sched classes to trigger extra cpufreq updates to prevent it from stalling,
2004  * but that really is a band-aid.  Going forward it should be replaced with
2005  * solutions targeted more specifically at RT and DL tasks.
2006  */
cpufreq_update_util(struct rq * rq,unsigned int flags)2007 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2008 {
2009 	struct update_util_data *data;
2010 
2011 	data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
2012 	if (data)
2013 		data->func(data, rq_clock(rq), flags);
2014 }
2015 
cpufreq_update_this_cpu(struct rq * rq,unsigned int flags)2016 static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
2017 {
2018 	if (cpu_of(rq) == smp_processor_id())
2019 		cpufreq_update_util(rq, flags);
2020 }
2021 #else
cpufreq_update_util(struct rq * rq,unsigned int flags)2022 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
cpufreq_update_this_cpu(struct rq * rq,unsigned int flags)2023 static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
2024 #endif /* CONFIG_CPU_FREQ */
2025 
2026 #ifdef CONFIG_SCHED_WALT
2027 
2028 static inline bool
walt_task_in_cum_window_demand(struct rq * rq,struct task_struct * p)2029 walt_task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
2030 {
2031 	return cpu_of(rq) == task_cpu(p) &&
2032 	       (p->on_rq || p->last_sleep_ts >= rq->window_start);
2033 }
2034 
2035 #endif /* CONFIG_SCHED_WALT */
2036 
2037 #ifdef arch_scale_freq_capacity
2038 #ifndef arch_scale_freq_invariant
2039 #define arch_scale_freq_invariant()	(true)
2040 #endif
2041 #else /* arch_scale_freq_capacity */
2042 #define arch_scale_freq_invariant()	(false)
2043 #endif
2044