1 /*
2 * kernel/sched/core.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
27 */
28
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/nmi.h>
32 #include <linux/init.h>
33 #include <linux/uaccess.h>
34 #include <linux/highmem.h>
35 #include <asm/mmu_context.h>
36 #include <linux/interrupt.h>
37 #include <linux/capability.h>
38 #include <linux/completion.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/debug_locks.h>
41 #include <linux/perf_event.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <linux/profile.h>
45 #include <linux/freezer.h>
46 #include <linux/vmalloc.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/pid_namespace.h>
50 #include <linux/smp.h>
51 #include <linux/threads.h>
52 #include <linux/timer.h>
53 #include <linux/rcupdate.h>
54 #include <linux/cpu.h>
55 #include <linux/cpuset.h>
56 #include <linux/percpu.h>
57 #include <linux/proc_fs.h>
58 #include <linux/seq_file.h>
59 #include <linux/sysctl.h>
60 #include <linux/syscalls.h>
61 #include <linux/times.h>
62 #include <linux/tsacct_kern.h>
63 #include <linux/kprobes.h>
64 #include <linux/delayacct.h>
65 #include <linux/unistd.h>
66 #include <linux/pagemap.h>
67 #include <linux/hrtimer.h>
68 #include <linux/tick.h>
69 #include <linux/debugfs.h>
70 #include <linux/ctype.h>
71 #include <linux/ftrace.h>
72 #include <linux/slab.h>
73 #include <linux/init_task.h>
74 #include <linux/binfmts.h>
75 #include <linux/context_tracking.h>
76 #include <linux/compiler.h>
77
78 #include <asm/switch_to.h>
79 #include <asm/tlb.h>
80 #include <asm/irq_regs.h>
81 #include <asm/mutex.h>
82 #ifdef CONFIG_PARAVIRT
83 #include <asm/paravirt.h>
84 #endif
85
86 #include "sched.h"
87 #include "../workqueue_internal.h"
88 #include "../smpboot.h"
89
90 #define CREATE_TRACE_POINTS
91 #include <trace/events/sched.h>
92 #include "walt.h"
93
start_bandwidth_timer(struct hrtimer * period_timer,ktime_t period)94 void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
95 {
96 unsigned long delta;
97 ktime_t soft, hard, now;
98
99 for (;;) {
100 if (hrtimer_active(period_timer))
101 break;
102
103 now = hrtimer_cb_get_time(period_timer);
104 hrtimer_forward(period_timer, now, period);
105
106 soft = hrtimer_get_softexpires(period_timer);
107 hard = hrtimer_get_expires(period_timer);
108 delta = ktime_to_ns(ktime_sub(hard, soft));
109 __hrtimer_start_range_ns(period_timer, soft, delta,
110 HRTIMER_MODE_ABS_PINNED, 0);
111 }
112 }
113
114 DEFINE_MUTEX(sched_domains_mutex);
115 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
116
117 static void update_rq_clock_task(struct rq *rq, s64 delta);
118
update_rq_clock(struct rq * rq)119 void update_rq_clock(struct rq *rq)
120 {
121 s64 delta;
122
123 if (rq->skip_clock_update > 0)
124 return;
125
126 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
127 if (delta < 0)
128 return;
129 rq->clock += delta;
130 update_rq_clock_task(rq, delta);
131 }
132
133 /*
134 * Debugging: various feature bits
135 */
136
137 #define SCHED_FEAT(name, enabled) \
138 (1UL << __SCHED_FEAT_##name) * enabled |
139
140 const_debug unsigned int sysctl_sched_features =
141 #include "features.h"
142 0;
143
144 #undef SCHED_FEAT
145
146 #ifdef CONFIG_SCHED_DEBUG
147 #define SCHED_FEAT(name, enabled) \
148 #name ,
149
150 static const char * const sched_feat_names[] = {
151 #include "features.h"
152 };
153
154 #undef SCHED_FEAT
155
sched_feat_show(struct seq_file * m,void * v)156 static int sched_feat_show(struct seq_file *m, void *v)
157 {
158 int i;
159
160 for (i = 0; i < __SCHED_FEAT_NR; i++) {
161 if (!(sysctl_sched_features & (1UL << i)))
162 seq_puts(m, "NO_");
163 seq_printf(m, "%s ", sched_feat_names[i]);
164 }
165 seq_puts(m, "\n");
166
167 return 0;
168 }
169
170 #ifdef HAVE_JUMP_LABEL
171
172 #define jump_label_key__true STATIC_KEY_INIT_TRUE
173 #define jump_label_key__false STATIC_KEY_INIT_FALSE
174
175 #define SCHED_FEAT(name, enabled) \
176 jump_label_key__##enabled ,
177
178 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
179 #include "features.h"
180 };
181
182 #undef SCHED_FEAT
183
sched_feat_disable(int i)184 static void sched_feat_disable(int i)
185 {
186 if (static_key_enabled(&sched_feat_keys[i]))
187 static_key_slow_dec(&sched_feat_keys[i]);
188 }
189
sched_feat_enable(int i)190 static void sched_feat_enable(int i)
191 {
192 if (!static_key_enabled(&sched_feat_keys[i]))
193 static_key_slow_inc(&sched_feat_keys[i]);
194 }
195 #else
sched_feat_disable(int i)196 static void sched_feat_disable(int i) { };
sched_feat_enable(int i)197 static void sched_feat_enable(int i) { };
198 #endif /* HAVE_JUMP_LABEL */
199
sched_feat_set(char * cmp)200 static int sched_feat_set(char *cmp)
201 {
202 int i;
203 int neg = 0;
204
205 if (strncmp(cmp, "NO_", 3) == 0) {
206 neg = 1;
207 cmp += 3;
208 }
209
210 for (i = 0; i < __SCHED_FEAT_NR; i++) {
211 if (strcmp(cmp, sched_feat_names[i]) == 0) {
212 if (neg) {
213 sysctl_sched_features &= ~(1UL << i);
214 sched_feat_disable(i);
215 } else {
216 sysctl_sched_features |= (1UL << i);
217 sched_feat_enable(i);
218 }
219 break;
220 }
221 }
222
223 return i;
224 }
225
226 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)227 sched_feat_write(struct file *filp, const char __user *ubuf,
228 size_t cnt, loff_t *ppos)
229 {
230 char buf[64];
231 char *cmp;
232 int i;
233 struct inode *inode;
234
235 if (cnt > 63)
236 cnt = 63;
237
238 if (copy_from_user(&buf, ubuf, cnt))
239 return -EFAULT;
240
241 buf[cnt] = 0;
242 cmp = strstrip(buf);
243
244 /* Ensure the static_key remains in a consistent state */
245 inode = file_inode(filp);
246 mutex_lock(&inode->i_mutex);
247 i = sched_feat_set(cmp);
248 mutex_unlock(&inode->i_mutex);
249 if (i == __SCHED_FEAT_NR)
250 return -EINVAL;
251
252 *ppos += cnt;
253
254 return cnt;
255 }
256
sched_feat_open(struct inode * inode,struct file * filp)257 static int sched_feat_open(struct inode *inode, struct file *filp)
258 {
259 return single_open(filp, sched_feat_show, NULL);
260 }
261
262 static const struct file_operations sched_feat_fops = {
263 .open = sched_feat_open,
264 .write = sched_feat_write,
265 .read = seq_read,
266 .llseek = seq_lseek,
267 .release = single_release,
268 };
269
sched_init_debug(void)270 static __init int sched_init_debug(void)
271 {
272 debugfs_create_file("sched_features", 0644, NULL, NULL,
273 &sched_feat_fops);
274
275 return 0;
276 }
277 late_initcall(sched_init_debug);
278 #endif /* CONFIG_SCHED_DEBUG */
279
280 /*
281 * Number of tasks to iterate in a single balance run.
282 * Limited because this is done with IRQs disabled.
283 */
284 const_debug unsigned int sysctl_sched_nr_migrate = 32;
285
286 /*
287 * period over which we average the RT time consumption, measured
288 * in ms.
289 *
290 * default: 1s
291 */
292 const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
293
294 /*
295 * period over which we measure -rt task cpu usage in us.
296 * default: 1s
297 */
298 unsigned int sysctl_sched_rt_period = 1000000;
299
300 __read_mostly int scheduler_running;
301
302 /*
303 * part of the period that we allow rt tasks to run in us.
304 * default: 0.95s
305 */
306 int sysctl_sched_rt_runtime = 950000;
307
308 /*
309 * __task_rq_lock - lock the rq @p resides on.
310 */
__task_rq_lock(struct task_struct * p)311 static inline struct rq *__task_rq_lock(struct task_struct *p)
312 __acquires(rq->lock)
313 {
314 struct rq *rq;
315
316 lockdep_assert_held(&p->pi_lock);
317
318 for (;;) {
319 rq = task_rq(p);
320 raw_spin_lock(&rq->lock);
321 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
322 return rq;
323 raw_spin_unlock(&rq->lock);
324
325 while (unlikely(task_on_rq_migrating(p)))
326 cpu_relax();
327 }
328 }
329
330 /*
331 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
332 */
task_rq_lock(struct task_struct * p,unsigned long * flags)333 static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
334 __acquires(p->pi_lock)
335 __acquires(rq->lock)
336 {
337 struct rq *rq;
338
339 for (;;) {
340 raw_spin_lock_irqsave(&p->pi_lock, *flags);
341 rq = task_rq(p);
342 raw_spin_lock(&rq->lock);
343 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
344 return rq;
345 raw_spin_unlock(&rq->lock);
346 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
347
348 while (unlikely(task_on_rq_migrating(p)))
349 cpu_relax();
350 }
351 }
352
353 struct rq *
lock_rq_of(struct task_struct * p,unsigned long * flags)354 lock_rq_of(struct task_struct *p, unsigned long *flags)
355 {
356 return task_rq_lock(p, flags);
357 }
358
__task_rq_unlock(struct rq * rq)359 static void __task_rq_unlock(struct rq *rq)
360 __releases(rq->lock)
361 {
362 raw_spin_unlock(&rq->lock);
363 }
364
365 static inline void
task_rq_unlock(struct rq * rq,struct task_struct * p,unsigned long * flags)366 task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
367 __releases(rq->lock)
368 __releases(p->pi_lock)
369 {
370 raw_spin_unlock(&rq->lock);
371 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
372 }
373
374 void
unlock_rq_of(struct rq * rq,struct task_struct * p,unsigned long * flags)375 unlock_rq_of(struct rq *rq, struct task_struct *p, unsigned long *flags)
376 {
377 task_rq_unlock(rq, p, flags);
378 }
379
380 /*
381 * this_rq_lock - lock this runqueue and disable interrupts.
382 */
this_rq_lock(void)383 static struct rq *this_rq_lock(void)
384 __acquires(rq->lock)
385 {
386 struct rq *rq;
387
388 local_irq_disable();
389 rq = this_rq();
390 raw_spin_lock(&rq->lock);
391
392 return rq;
393 }
394
395 #ifdef CONFIG_SCHED_HRTICK
396 /*
397 * Use HR-timers to deliver accurate preemption points.
398 */
399
hrtick_clear(struct rq * rq)400 static void hrtick_clear(struct rq *rq)
401 {
402 if (hrtimer_active(&rq->hrtick_timer))
403 hrtimer_cancel(&rq->hrtick_timer);
404 }
405
406 /*
407 * High-resolution timer tick.
408 * Runs from hardirq context with interrupts disabled.
409 */
hrtick(struct hrtimer * timer)410 static enum hrtimer_restart hrtick(struct hrtimer *timer)
411 {
412 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
413
414 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
415
416 raw_spin_lock(&rq->lock);
417 update_rq_clock(rq);
418 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
419 raw_spin_unlock(&rq->lock);
420
421 return HRTIMER_NORESTART;
422 }
423
424 #ifdef CONFIG_SMP
425
__hrtick_restart(struct rq * rq)426 static int __hrtick_restart(struct rq *rq)
427 {
428 struct hrtimer *timer = &rq->hrtick_timer;
429 ktime_t time = hrtimer_get_softexpires(timer);
430
431 return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
432 }
433
434 /*
435 * called from hardirq (IPI) context
436 */
__hrtick_start(void * arg)437 static void __hrtick_start(void *arg)
438 {
439 struct rq *rq = arg;
440
441 raw_spin_lock(&rq->lock);
442 __hrtick_restart(rq);
443 rq->hrtick_csd_pending = 0;
444 raw_spin_unlock(&rq->lock);
445 }
446
447 /*
448 * Called to set the hrtick timer state.
449 *
450 * called with rq->lock held and irqs disabled
451 */
hrtick_start(struct rq * rq,u64 delay)452 void hrtick_start(struct rq *rq, u64 delay)
453 {
454 struct hrtimer *timer = &rq->hrtick_timer;
455 ktime_t time;
456 s64 delta;
457
458 /*
459 * Don't schedule slices shorter than 10000ns, that just
460 * doesn't make sense and can cause timer DoS.
461 */
462 delta = max_t(s64, delay, 10000LL);
463 time = ktime_add_ns(timer->base->get_time(), delta);
464
465 hrtimer_set_expires(timer, time);
466
467 if (rq == this_rq()) {
468 __hrtick_restart(rq);
469 } else if (!rq->hrtick_csd_pending) {
470 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
471 rq->hrtick_csd_pending = 1;
472 }
473 }
474
475 static int
hotplug_hrtick(struct notifier_block * nfb,unsigned long action,void * hcpu)476 hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
477 {
478 int cpu = (int)(long)hcpu;
479
480 switch (action) {
481 case CPU_UP_CANCELED:
482 case CPU_UP_CANCELED_FROZEN:
483 case CPU_DOWN_PREPARE:
484 case CPU_DOWN_PREPARE_FROZEN:
485 case CPU_DEAD:
486 case CPU_DEAD_FROZEN:
487 hrtick_clear(cpu_rq(cpu));
488 return NOTIFY_OK;
489 }
490
491 return NOTIFY_DONE;
492 }
493
init_hrtick(void)494 static __init void init_hrtick(void)
495 {
496 hotcpu_notifier(hotplug_hrtick, 0);
497 }
498 #else
499 /*
500 * Called to set the hrtick timer state.
501 *
502 * called with rq->lock held and irqs disabled
503 */
hrtick_start(struct rq * rq,u64 delay)504 void hrtick_start(struct rq *rq, u64 delay)
505 {
506 /*
507 * Don't schedule slices shorter than 10000ns, that just
508 * doesn't make sense. Rely on vruntime for fairness.
509 */
510 delay = max_t(u64, delay, 10000LL);
511 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
512 HRTIMER_MODE_REL_PINNED, 0);
513 }
514
init_hrtick(void)515 static inline void init_hrtick(void)
516 {
517 }
518 #endif /* CONFIG_SMP */
519
init_rq_hrtick(struct rq * rq)520 static void init_rq_hrtick(struct rq *rq)
521 {
522 #ifdef CONFIG_SMP
523 rq->hrtick_csd_pending = 0;
524
525 rq->hrtick_csd.flags = 0;
526 rq->hrtick_csd.func = __hrtick_start;
527 rq->hrtick_csd.info = rq;
528 #endif
529
530 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
531 rq->hrtick_timer.function = hrtick;
532 }
533 #else /* CONFIG_SCHED_HRTICK */
hrtick_clear(struct rq * rq)534 static inline void hrtick_clear(struct rq *rq)
535 {
536 }
537
init_rq_hrtick(struct rq * rq)538 static inline void init_rq_hrtick(struct rq *rq)
539 {
540 }
541
init_hrtick(void)542 static inline void init_hrtick(void)
543 {
544 }
545 #endif /* CONFIG_SCHED_HRTICK */
546
547 /*
548 * cmpxchg based fetch_or, macro so it works for different integer types
549 */
550 #define fetch_or(ptr, val) \
551 ({ typeof(*(ptr)) __old, __val = *(ptr); \
552 for (;;) { \
553 __old = cmpxchg((ptr), __val, __val | (val)); \
554 if (__old == __val) \
555 break; \
556 __val = __old; \
557 } \
558 __old; \
559 })
560
561 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
562 /*
563 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
564 * this avoids any races wrt polling state changes and thereby avoids
565 * spurious IPIs.
566 */
set_nr_and_not_polling(struct task_struct * p)567 static bool set_nr_and_not_polling(struct task_struct *p)
568 {
569 struct thread_info *ti = task_thread_info(p);
570 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
571 }
572
573 /*
574 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
575 *
576 * If this returns true, then the idle task promises to call
577 * sched_ttwu_pending() and reschedule soon.
578 */
set_nr_if_polling(struct task_struct * p)579 static bool set_nr_if_polling(struct task_struct *p)
580 {
581 struct thread_info *ti = task_thread_info(p);
582 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
583
584 for (;;) {
585 if (!(val & _TIF_POLLING_NRFLAG))
586 return false;
587 if (val & _TIF_NEED_RESCHED)
588 return true;
589 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
590 if (old == val)
591 break;
592 val = old;
593 }
594 return true;
595 }
596
597 #else
set_nr_and_not_polling(struct task_struct * p)598 static bool set_nr_and_not_polling(struct task_struct *p)
599 {
600 set_tsk_need_resched(p);
601 return true;
602 }
603
604 #ifdef CONFIG_SMP
set_nr_if_polling(struct task_struct * p)605 static bool set_nr_if_polling(struct task_struct *p)
606 {
607 return false;
608 }
609 #endif
610 #endif
611
612 /*
613 * resched_curr - mark rq's current task 'to be rescheduled now'.
614 *
615 * On UP this means the setting of the need_resched flag, on SMP it
616 * might also involve a cross-CPU call to trigger the scheduler on
617 * the target CPU.
618 */
resched_curr(struct rq * rq)619 void resched_curr(struct rq *rq)
620 {
621 struct task_struct *curr = rq->curr;
622 int cpu;
623
624 lockdep_assert_held(&rq->lock);
625
626 if (test_tsk_need_resched(curr))
627 return;
628
629 cpu = cpu_of(rq);
630
631 if (cpu == smp_processor_id()) {
632 set_tsk_need_resched(curr);
633 set_preempt_need_resched();
634 return;
635 }
636
637 if (set_nr_and_not_polling(curr))
638 smp_send_reschedule(cpu);
639 else
640 trace_sched_wake_idle_without_ipi(cpu);
641 }
642
resched_cpu(int cpu)643 void resched_cpu(int cpu)
644 {
645 struct rq *rq = cpu_rq(cpu);
646 unsigned long flags;
647
648 raw_spin_lock_irqsave(&rq->lock, flags);
649 resched_curr(rq);
650 raw_spin_unlock_irqrestore(&rq->lock, flags);
651 }
652
653 #ifdef CONFIG_SMP
654 #ifdef CONFIG_NO_HZ_COMMON
655 /*
656 * In the semi idle case, use the nearest busy cpu for migrating timers
657 * from an idle cpu. This is good for power-savings.
658 *
659 * We don't do similar optimization for completely idle system, as
660 * selecting an idle cpu will add more delays to the timers than intended
661 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
662 */
get_nohz_timer_target(int pinned)663 int get_nohz_timer_target(int pinned)
664 {
665 int cpu = smp_processor_id();
666 int i;
667 struct sched_domain *sd;
668
669 if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
670 return cpu;
671
672 rcu_read_lock();
673 for_each_domain(cpu, sd) {
674 for_each_cpu(i, sched_domain_span(sd)) {
675 if (!idle_cpu(i)) {
676 cpu = i;
677 goto unlock;
678 }
679 }
680 }
681 unlock:
682 rcu_read_unlock();
683 return cpu;
684 }
685 /*
686 * When add_timer_on() enqueues a timer into the timer wheel of an
687 * idle CPU then this timer might expire before the next timer event
688 * which is scheduled to wake up that CPU. In case of a completely
689 * idle system the next event might even be infinite time into the
690 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
691 * leaves the inner idle loop so the newly added timer is taken into
692 * account when the CPU goes back to idle and evaluates the timer
693 * wheel for the next timer event.
694 */
wake_up_idle_cpu(int cpu)695 static void wake_up_idle_cpu(int cpu)
696 {
697 struct rq *rq = cpu_rq(cpu);
698
699 if (cpu == smp_processor_id())
700 return;
701
702 if (set_nr_and_not_polling(rq->idle))
703 smp_send_reschedule(cpu);
704 else
705 trace_sched_wake_idle_without_ipi(cpu);
706 }
707
wake_up_full_nohz_cpu(int cpu)708 static bool wake_up_full_nohz_cpu(int cpu)
709 {
710 /*
711 * We just need the target to call irq_exit() and re-evaluate
712 * the next tick. The nohz full kick at least implies that.
713 * If needed we can still optimize that later with an
714 * empty IRQ.
715 */
716 if (tick_nohz_full_cpu(cpu)) {
717 if (cpu != smp_processor_id() ||
718 tick_nohz_tick_stopped())
719 tick_nohz_full_kick_cpu(cpu);
720 return true;
721 }
722
723 return false;
724 }
725
wake_up_nohz_cpu(int cpu)726 void wake_up_nohz_cpu(int cpu)
727 {
728 if (!wake_up_full_nohz_cpu(cpu))
729 wake_up_idle_cpu(cpu);
730 }
731
got_nohz_idle_kick(void)732 static inline bool got_nohz_idle_kick(void)
733 {
734 int cpu = smp_processor_id();
735
736 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
737 return false;
738
739 if (idle_cpu(cpu) && !need_resched())
740 return true;
741
742 /*
743 * We can't run Idle Load Balance on this CPU for this time so we
744 * cancel it and clear NOHZ_BALANCE_KICK
745 */
746 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
747 return false;
748 }
749
750 #else /* CONFIG_NO_HZ_COMMON */
751
got_nohz_idle_kick(void)752 static inline bool got_nohz_idle_kick(void)
753 {
754 return false;
755 }
756
757 #endif /* CONFIG_NO_HZ_COMMON */
758
759 #ifdef CONFIG_NO_HZ_FULL
sched_can_stop_tick(void)760 bool sched_can_stop_tick(void)
761 {
762 /*
763 * More than one running task need preemption.
764 * nr_running update is assumed to be visible
765 * after IPI is sent from wakers.
766 */
767 if (this_rq()->nr_running > 1)
768 return false;
769
770 return true;
771 }
772 #endif /* CONFIG_NO_HZ_FULL */
773
sched_avg_update(struct rq * rq)774 void sched_avg_update(struct rq *rq)
775 {
776 s64 period = sched_avg_period();
777
778 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
779 /*
780 * Inline assembly required to prevent the compiler
781 * optimising this loop into a divmod call.
782 * See __iter_div_u64_rem() for another example of this.
783 */
784 asm("" : "+rm" (rq->age_stamp));
785 rq->age_stamp += period;
786 rq->rt_avg /= 2;
787 }
788 }
789
790 #endif /* CONFIG_SMP */
791
792 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
793 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
794 /*
795 * Iterate task_group tree rooted at *from, calling @down when first entering a
796 * node and @up when leaving it for the final time.
797 *
798 * Caller must hold rcu_lock or sufficient equivalent.
799 */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)800 int walk_tg_tree_from(struct task_group *from,
801 tg_visitor down, tg_visitor up, void *data)
802 {
803 struct task_group *parent, *child;
804 int ret;
805
806 parent = from;
807
808 down:
809 ret = (*down)(parent, data);
810 if (ret)
811 goto out;
812 list_for_each_entry_rcu(child, &parent->children, siblings) {
813 parent = child;
814 goto down;
815
816 up:
817 continue;
818 }
819 ret = (*up)(parent, data);
820 if (ret || parent == from)
821 goto out;
822
823 child = parent;
824 parent = parent->parent;
825 if (parent)
826 goto up;
827 out:
828 return ret;
829 }
830
tg_nop(struct task_group * tg,void * data)831 int tg_nop(struct task_group *tg, void *data)
832 {
833 return 0;
834 }
835 #endif
836
set_load_weight(struct task_struct * p)837 static void set_load_weight(struct task_struct *p)
838 {
839 int prio = p->static_prio - MAX_RT_PRIO;
840 struct load_weight *load = &p->se.load;
841
842 /*
843 * SCHED_IDLE tasks get minimal weight:
844 */
845 if (p->policy == SCHED_IDLE) {
846 load->weight = scale_load(WEIGHT_IDLEPRIO);
847 load->inv_weight = WMULT_IDLEPRIO;
848 return;
849 }
850
851 load->weight = scale_load(prio_to_weight[prio]);
852 load->inv_weight = prio_to_wmult[prio];
853 }
854
enqueue_task(struct rq * rq,struct task_struct * p,int flags)855 static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
856 {
857 update_rq_clock(rq);
858 sched_info_queued(rq, p);
859 p->sched_class->enqueue_task(rq, p, flags);
860 }
861
dequeue_task(struct rq * rq,struct task_struct * p,int flags)862 static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
863 {
864 update_rq_clock(rq);
865 sched_info_dequeued(rq, p);
866 p->sched_class->dequeue_task(rq, p, flags);
867 }
868
activate_task(struct rq * rq,struct task_struct * p,int flags)869 void activate_task(struct rq *rq, struct task_struct *p, int flags)
870 {
871 if (task_contributes_to_load(p))
872 rq->nr_uninterruptible--;
873
874 enqueue_task(rq, p, flags);
875 }
876
deactivate_task(struct rq * rq,struct task_struct * p,int flags)877 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
878 {
879 if (task_contributes_to_load(p))
880 rq->nr_uninterruptible++;
881
882 dequeue_task(rq, p, flags);
883 }
884
update_rq_clock_task(struct rq * rq,s64 delta)885 static void update_rq_clock_task(struct rq *rq, s64 delta)
886 {
887 /*
888 * In theory, the compile should just see 0 here, and optimize out the call
889 * to sched_rt_avg_update. But I don't trust it...
890 */
891 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
892 s64 steal = 0, irq_delta = 0;
893 #endif
894 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
895 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
896
897 /*
898 * Since irq_time is only updated on {soft,}irq_exit, we might run into
899 * this case when a previous update_rq_clock() happened inside a
900 * {soft,}irq region.
901 *
902 * When this happens, we stop ->clock_task and only update the
903 * prev_irq_time stamp to account for the part that fit, so that a next
904 * update will consume the rest. This ensures ->clock_task is
905 * monotonic.
906 *
907 * It does however cause some slight miss-attribution of {soft,}irq
908 * time, a more accurate solution would be to update the irq_time using
909 * the current rq->clock timestamp, except that would require using
910 * atomic ops.
911 */
912 if (irq_delta > delta)
913 irq_delta = delta;
914
915 rq->prev_irq_time += irq_delta;
916 delta -= irq_delta;
917 #endif
918 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
919 if (static_key_false((¶virt_steal_rq_enabled))) {
920 steal = paravirt_steal_clock(cpu_of(rq));
921 steal -= rq->prev_steal_time_rq;
922
923 if (unlikely(steal > delta))
924 steal = delta;
925
926 rq->prev_steal_time_rq += steal;
927 delta -= steal;
928 }
929 #endif
930
931 rq->clock_task += delta;
932
933 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
934 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
935 sched_rt_avg_update(rq, irq_delta + steal);
936 #endif
937 }
938
sched_set_stop_task(int cpu,struct task_struct * stop)939 void sched_set_stop_task(int cpu, struct task_struct *stop)
940 {
941 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
942 struct task_struct *old_stop = cpu_rq(cpu)->stop;
943
944 if (stop) {
945 /*
946 * Make it appear like a SCHED_FIFO task, its something
947 * userspace knows about and won't get confused about.
948 *
949 * Also, it will make PI more or less work without too
950 * much confusion -- but then, stop work should not
951 * rely on PI working anyway.
952 */
953 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
954
955 stop->sched_class = &stop_sched_class;
956 }
957
958 cpu_rq(cpu)->stop = stop;
959
960 if (old_stop) {
961 /*
962 * Reset it back to a normal scheduling class so that
963 * it can die in pieces.
964 */
965 old_stop->sched_class = &rt_sched_class;
966 }
967 }
968
969 /*
970 * __normal_prio - return the priority that is based on the static prio
971 */
__normal_prio(struct task_struct * p)972 static inline int __normal_prio(struct task_struct *p)
973 {
974 return p->static_prio;
975 }
976
977 /*
978 * Calculate the expected normal priority: i.e. priority
979 * without taking RT-inheritance into account. Might be
980 * boosted by interactivity modifiers. Changes upon fork,
981 * setprio syscalls, and whenever the interactivity
982 * estimator recalculates.
983 */
normal_prio(struct task_struct * p)984 static inline int normal_prio(struct task_struct *p)
985 {
986 int prio;
987
988 if (task_has_dl_policy(p))
989 prio = MAX_DL_PRIO-1;
990 else if (task_has_rt_policy(p))
991 prio = MAX_RT_PRIO-1 - p->rt_priority;
992 else
993 prio = __normal_prio(p);
994 return prio;
995 }
996
997 /*
998 * Calculate the current priority, i.e. the priority
999 * taken into account by the scheduler. This value might
1000 * be boosted by RT tasks, or might be boosted by
1001 * interactivity modifiers. Will be RT if the task got
1002 * RT-boosted. If not then it returns p->normal_prio.
1003 */
effective_prio(struct task_struct * p)1004 static int effective_prio(struct task_struct *p)
1005 {
1006 p->normal_prio = normal_prio(p);
1007 /*
1008 * If we are RT tasks or we were boosted to RT priority,
1009 * keep the priority unchanged. Otherwise, update priority
1010 * to the normal priority:
1011 */
1012 if (!rt_prio(p->prio))
1013 return p->normal_prio;
1014 return p->prio;
1015 }
1016
1017 /**
1018 * task_curr - is this task currently executing on a CPU?
1019 * @p: the task in question.
1020 *
1021 * Return: 1 if the task is currently executing. 0 otherwise.
1022 */
task_curr(const struct task_struct * p)1023 inline int task_curr(const struct task_struct *p)
1024 {
1025 return cpu_curr(task_cpu(p)) == p;
1026 }
1027
1028 /*
1029 * Can drop rq->lock because from sched_class::switched_from() methods drop it.
1030 */
check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio)1031 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1032 const struct sched_class *prev_class,
1033 int oldprio)
1034 {
1035 if (prev_class != p->sched_class) {
1036 if (prev_class->switched_from)
1037 prev_class->switched_from(rq, p);
1038 /* Possble rq->lock 'hole'. */
1039 p->sched_class->switched_to(rq, p);
1040 } else if (oldprio != p->prio || dl_task(p))
1041 p->sched_class->prio_changed(rq, p, oldprio);
1042 }
1043
check_preempt_curr(struct rq * rq,struct task_struct * p,int flags)1044 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1045 {
1046 const struct sched_class *class;
1047
1048 if (p->sched_class == rq->curr->sched_class) {
1049 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1050 } else {
1051 for_each_class(class) {
1052 if (class == rq->curr->sched_class)
1053 break;
1054 if (class == p->sched_class) {
1055 resched_curr(rq);
1056 break;
1057 }
1058 }
1059 }
1060
1061 /*
1062 * A queue event has occurred, and we're going to schedule. In
1063 * this case, we can save a useless back to back clock update.
1064 */
1065 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
1066 rq->skip_clock_update = 1;
1067 }
1068
1069 #ifdef CONFIG_SMP
1070 /*
1071 * This is how migration works:
1072 *
1073 * 1) we invoke migration_cpu_stop() on the target CPU using
1074 * stop_one_cpu().
1075 * 2) stopper starts to run (implicitly forcing the migrated thread
1076 * off the CPU)
1077 * 3) it checks whether the migrated task is still in the wrong runqueue.
1078 * 4) if it's in the wrong runqueue then the migration thread removes
1079 * it and puts it into the right queue.
1080 * 5) stopper completes and stop_one_cpu() returns and the migration
1081 * is done.
1082 */
1083
1084 /*
1085 * move_queued_task - move a queued task to new rq.
1086 *
1087 * Returns (locked) new rq. Old rq's lock is released.
1088 */
move_queued_task(struct task_struct * p,int new_cpu)1089 static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
1090 {
1091 struct rq *rq = task_rq(p);
1092
1093 lockdep_assert_held(&rq->lock);
1094
1095 dequeue_task(rq, p, 0);
1096 p->on_rq = TASK_ON_RQ_MIGRATING;
1097 double_lock_balance(rq, cpu_rq(new_cpu));
1098 set_task_cpu(p, new_cpu);
1099 double_unlock_balance(rq, cpu_rq(new_cpu));
1100 raw_spin_unlock(&rq->lock);
1101
1102 rq = cpu_rq(new_cpu);
1103
1104 raw_spin_lock(&rq->lock);
1105 BUG_ON(task_cpu(p) != new_cpu);
1106 p->on_rq = TASK_ON_RQ_QUEUED;
1107 enqueue_task(rq, p, 0);
1108 check_preempt_curr(rq, p, 0);
1109
1110 return rq;
1111 }
1112
1113 struct migration_arg {
1114 struct task_struct *task;
1115 int dest_cpu;
1116 };
1117
1118 /*
1119 * Move (not current) task off this cpu, onto dest cpu. We're doing
1120 * this because either it can't run here any more (set_cpus_allowed()
1121 * away from this CPU, or CPU going down), or because we're
1122 * attempting to rebalance this task on exec (sched_exec).
1123 *
1124 * So we race with normal scheduler movements, but that's OK, as long
1125 * as the task is no longer on this CPU.
1126 *
1127 * Returns non-zero if task was successfully migrated.
1128 */
__migrate_task(struct task_struct * p,int src_cpu,int dest_cpu)1129 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
1130 {
1131 struct rq *rq;
1132 int ret = 0;
1133
1134 if (unlikely(!cpu_active(dest_cpu)))
1135 return ret;
1136
1137 rq = cpu_rq(src_cpu);
1138
1139 raw_spin_lock(&p->pi_lock);
1140 raw_spin_lock(&rq->lock);
1141 /* Already moved. */
1142 if (task_cpu(p) != src_cpu)
1143 goto done;
1144
1145 /* Affinity changed (again). */
1146 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1147 goto fail;
1148
1149 /*
1150 * If we're not on a rq, the next wake-up will ensure we're
1151 * placed properly.
1152 */
1153 if (task_on_rq_queued(p))
1154 rq = move_queued_task(p, dest_cpu);
1155 done:
1156 ret = 1;
1157 fail:
1158 raw_spin_unlock(&rq->lock);
1159 raw_spin_unlock(&p->pi_lock);
1160 return ret;
1161 }
1162
1163 /*
1164 * migration_cpu_stop - this will be executed by a highprio stopper thread
1165 * and performs thread migration by bumping thread off CPU then
1166 * 'pushing' onto another runqueue.
1167 */
migration_cpu_stop(void * data)1168 static int migration_cpu_stop(void *data)
1169 {
1170 struct migration_arg *arg = data;
1171
1172 /*
1173 * The original target cpu might have gone down and we might
1174 * be on another cpu but it doesn't matter.
1175 */
1176 local_irq_disable();
1177 /*
1178 * We need to explicitly wake pending tasks before running
1179 * __migrate_task() such that we will not miss enforcing cpus_allowed
1180 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1181 */
1182 sched_ttwu_pending();
1183 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
1184 local_irq_enable();
1185 return 0;
1186 }
1187
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)1188 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1189 {
1190 lockdep_assert_held(&p->pi_lock);
1191
1192 if (p->sched_class->set_cpus_allowed)
1193 p->sched_class->set_cpus_allowed(p, new_mask);
1194
1195 cpumask_copy(&p->cpus_allowed, new_mask);
1196 p->nr_cpus_allowed = cpumask_weight(new_mask);
1197 }
1198
1199 /*
1200 * Change a given task's CPU affinity. Migrate the thread to a
1201 * proper CPU and schedule it away if the CPU it's executing on
1202 * is removed from the allowed bitmask.
1203 *
1204 * NOTE: the caller must have a valid reference to the task, the
1205 * task must not exit() & deallocate itself prematurely. The
1206 * call is not atomic; no spinlocks may be held.
1207 */
__set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask,bool check)1208 static int __set_cpus_allowed_ptr(struct task_struct *p,
1209 const struct cpumask *new_mask, bool check)
1210 {
1211 unsigned long flags;
1212 struct rq *rq;
1213 unsigned int dest_cpu;
1214 int ret = 0;
1215
1216 rq = task_rq_lock(p, &flags);
1217
1218 /*
1219 * Must re-check here, to close a race against __kthread_bind(),
1220 * sched_setaffinity() is not guaranteed to observe the flag.
1221 */
1222 if (check && (p->flags & PF_NO_SETAFFINITY)) {
1223 ret = -EINVAL;
1224 goto out;
1225 }
1226
1227 if (cpumask_equal(&p->cpus_allowed, new_mask))
1228 goto out;
1229
1230 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1231 ret = -EINVAL;
1232 goto out;
1233 }
1234
1235 do_set_cpus_allowed(p, new_mask);
1236
1237 /* Can the task run on the task's current CPU? If so, we're done */
1238 if (cpumask_test_cpu(task_cpu(p), new_mask))
1239 goto out;
1240
1241 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
1242 if (task_running(rq, p) || p->state == TASK_WAKING) {
1243 struct migration_arg arg = { p, dest_cpu };
1244 /* Need help from migration thread: drop lock and wait. */
1245 task_rq_unlock(rq, p, &flags);
1246 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1247 tlb_migrate_finish(p->mm);
1248 return 0;
1249 } else if (task_on_rq_queued(p))
1250 rq = move_queued_task(p, dest_cpu);
1251 out:
1252 task_rq_unlock(rq, p, &flags);
1253
1254 return ret;
1255 }
1256
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)1257 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1258 {
1259 return __set_cpus_allowed_ptr(p, new_mask, false);
1260 }
1261 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1262
set_task_cpu(struct task_struct * p,unsigned int new_cpu)1263 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1264 {
1265 #ifdef CONFIG_SCHED_DEBUG
1266 /*
1267 * We should never call set_task_cpu() on a blocked task,
1268 * ttwu() will sort out the placement.
1269 */
1270 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1271 !(task_preempt_count(p) & PREEMPT_ACTIVE));
1272
1273 #ifdef CONFIG_LOCKDEP
1274 /*
1275 * The caller should hold either p->pi_lock or rq->lock, when changing
1276 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1277 *
1278 * sched_move_task() holds both and thus holding either pins the cgroup,
1279 * see task_group().
1280 *
1281 * Furthermore, all task_rq users should acquire both locks, see
1282 * task_rq_lock().
1283 */
1284 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1285 lockdep_is_held(&task_rq(p)->lock)));
1286 #endif
1287 #endif
1288
1289 trace_sched_migrate_task(p, new_cpu);
1290
1291 if (task_cpu(p) != new_cpu) {
1292 if (p->sched_class->migrate_task_rq)
1293 p->sched_class->migrate_task_rq(p, new_cpu);
1294 p->se.nr_migrations++;
1295 perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
1296
1297 walt_fixup_busy_time(p, new_cpu);
1298 }
1299
1300 __set_task_cpu(p, new_cpu);
1301 }
1302
__migrate_swap_task(struct task_struct * p,int cpu)1303 static void __migrate_swap_task(struct task_struct *p, int cpu)
1304 {
1305 if (task_on_rq_queued(p)) {
1306 struct rq *src_rq, *dst_rq;
1307
1308 src_rq = task_rq(p);
1309 dst_rq = cpu_rq(cpu);
1310
1311 deactivate_task(src_rq, p, 0);
1312 set_task_cpu(p, cpu);
1313 activate_task(dst_rq, p, 0);
1314 check_preempt_curr(dst_rq, p, 0);
1315 } else {
1316 /*
1317 * Task isn't running anymore; make it appear like we migrated
1318 * it before it went to sleep. This means on wakeup we make the
1319 * previous cpu our targer instead of where it really is.
1320 */
1321 p->wake_cpu = cpu;
1322 }
1323 }
1324
1325 struct migration_swap_arg {
1326 struct task_struct *src_task, *dst_task;
1327 int src_cpu, dst_cpu;
1328 };
1329
migrate_swap_stop(void * data)1330 static int migrate_swap_stop(void *data)
1331 {
1332 struct migration_swap_arg *arg = data;
1333 struct rq *src_rq, *dst_rq;
1334 int ret = -EAGAIN;
1335
1336 src_rq = cpu_rq(arg->src_cpu);
1337 dst_rq = cpu_rq(arg->dst_cpu);
1338
1339 double_raw_lock(&arg->src_task->pi_lock,
1340 &arg->dst_task->pi_lock);
1341 double_rq_lock(src_rq, dst_rq);
1342 if (task_cpu(arg->dst_task) != arg->dst_cpu)
1343 goto unlock;
1344
1345 if (task_cpu(arg->src_task) != arg->src_cpu)
1346 goto unlock;
1347
1348 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
1349 goto unlock;
1350
1351 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
1352 goto unlock;
1353
1354 __migrate_swap_task(arg->src_task, arg->dst_cpu);
1355 __migrate_swap_task(arg->dst_task, arg->src_cpu);
1356
1357 ret = 0;
1358
1359 unlock:
1360 double_rq_unlock(src_rq, dst_rq);
1361 raw_spin_unlock(&arg->dst_task->pi_lock);
1362 raw_spin_unlock(&arg->src_task->pi_lock);
1363
1364 return ret;
1365 }
1366
1367 /*
1368 * Cross migrate two tasks
1369 */
migrate_swap(struct task_struct * cur,struct task_struct * p)1370 int migrate_swap(struct task_struct *cur, struct task_struct *p)
1371 {
1372 struct migration_swap_arg arg;
1373 int ret = -EINVAL;
1374
1375 arg = (struct migration_swap_arg){
1376 .src_task = cur,
1377 .src_cpu = task_cpu(cur),
1378 .dst_task = p,
1379 .dst_cpu = task_cpu(p),
1380 };
1381
1382 if (arg.src_cpu == arg.dst_cpu)
1383 goto out;
1384
1385 /*
1386 * These three tests are all lockless; this is OK since all of them
1387 * will be re-checked with proper locks held further down the line.
1388 */
1389 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1390 goto out;
1391
1392 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
1393 goto out;
1394
1395 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
1396 goto out;
1397
1398 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
1399 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1400
1401 out:
1402 return ret;
1403 }
1404
1405 /*
1406 * wait_task_inactive - wait for a thread to unschedule.
1407 *
1408 * If @match_state is nonzero, it's the @p->state value just checked and
1409 * not expected to change. If it changes, i.e. @p might have woken up,
1410 * then return zero. When we succeed in waiting for @p to be off its CPU,
1411 * we return a positive number (its total switch count). If a second call
1412 * a short while later returns the same number, the caller can be sure that
1413 * @p has remained unscheduled the whole time.
1414 *
1415 * The caller must ensure that the task *will* unschedule sometime soon,
1416 * else this function might spin for a *long* time. This function can't
1417 * be called with interrupts off, or it may introduce deadlock with
1418 * smp_call_function() if an IPI is sent by the same process we are
1419 * waiting to become inactive.
1420 */
wait_task_inactive(struct task_struct * p,long match_state)1421 unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1422 {
1423 unsigned long flags;
1424 int running, queued;
1425 unsigned long ncsw;
1426 struct rq *rq;
1427
1428 for (;;) {
1429 /*
1430 * We do the initial early heuristics without holding
1431 * any task-queue locks at all. We'll only try to get
1432 * the runqueue lock when things look like they will
1433 * work out!
1434 */
1435 rq = task_rq(p);
1436
1437 /*
1438 * If the task is actively running on another CPU
1439 * still, just relax and busy-wait without holding
1440 * any locks.
1441 *
1442 * NOTE! Since we don't hold any locks, it's not
1443 * even sure that "rq" stays as the right runqueue!
1444 * But we don't care, since "task_running()" will
1445 * return false if the runqueue has changed and p
1446 * is actually now running somewhere else!
1447 */
1448 while (task_running(rq, p)) {
1449 if (match_state && unlikely(p->state != match_state))
1450 return 0;
1451 cpu_relax();
1452 }
1453
1454 /*
1455 * Ok, time to look more closely! We need the rq
1456 * lock now, to be *sure*. If we're wrong, we'll
1457 * just go back and repeat.
1458 */
1459 rq = task_rq_lock(p, &flags);
1460 trace_sched_wait_task(p);
1461 running = task_running(rq, p);
1462 queued = task_on_rq_queued(p);
1463 ncsw = 0;
1464 if (!match_state || p->state == match_state)
1465 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1466 task_rq_unlock(rq, p, &flags);
1467
1468 /*
1469 * If it changed from the expected state, bail out now.
1470 */
1471 if (unlikely(!ncsw))
1472 break;
1473
1474 /*
1475 * Was it really running after all now that we
1476 * checked with the proper locks actually held?
1477 *
1478 * Oops. Go back and try again..
1479 */
1480 if (unlikely(running)) {
1481 cpu_relax();
1482 continue;
1483 }
1484
1485 /*
1486 * It's not enough that it's not actively running,
1487 * it must be off the runqueue _entirely_, and not
1488 * preempted!
1489 *
1490 * So if it was still runnable (but just not actively
1491 * running right now), it's preempted, and we should
1492 * yield - it could be a while.
1493 */
1494 if (unlikely(queued)) {
1495 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1496
1497 set_current_state(TASK_UNINTERRUPTIBLE);
1498 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1499 continue;
1500 }
1501
1502 /*
1503 * Ahh, all good. It wasn't running, and it wasn't
1504 * runnable, which means that it will never become
1505 * running in the future either. We're all done!
1506 */
1507 break;
1508 }
1509
1510 return ncsw;
1511 }
1512
1513 /***
1514 * kick_process - kick a running thread to enter/exit the kernel
1515 * @p: the to-be-kicked thread
1516 *
1517 * Cause a process which is running on another CPU to enter
1518 * kernel-mode, without any delay. (to get signals handled.)
1519 *
1520 * NOTE: this function doesn't have to take the runqueue lock,
1521 * because all it wants to ensure is that the remote task enters
1522 * the kernel. If the IPI races and the task has been migrated
1523 * to another CPU then no harm is done and the purpose has been
1524 * achieved as well.
1525 */
kick_process(struct task_struct * p)1526 void kick_process(struct task_struct *p)
1527 {
1528 int cpu;
1529
1530 preempt_disable();
1531 cpu = task_cpu(p);
1532 if ((cpu != smp_processor_id()) && task_curr(p))
1533 smp_send_reschedule(cpu);
1534 preempt_enable();
1535 }
1536 EXPORT_SYMBOL_GPL(kick_process);
1537
1538 /*
1539 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1540 */
select_fallback_rq(int cpu,struct task_struct * p)1541 static int select_fallback_rq(int cpu, struct task_struct *p)
1542 {
1543 int nid = cpu_to_node(cpu);
1544 const struct cpumask *nodemask = NULL;
1545 enum { cpuset, possible, fail } state = cpuset;
1546 int dest_cpu;
1547
1548 /*
1549 * If the node that the cpu is on has been offlined, cpu_to_node()
1550 * will return -1. There is no cpu on the node, and we should
1551 * select the cpu on the other node.
1552 */
1553 if (nid != -1) {
1554 nodemask = cpumask_of_node(nid);
1555
1556 /* Look for allowed, online CPU in same node. */
1557 for_each_cpu(dest_cpu, nodemask) {
1558 if (!cpu_online(dest_cpu))
1559 continue;
1560 if (!cpu_active(dest_cpu))
1561 continue;
1562 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1563 return dest_cpu;
1564 }
1565 }
1566
1567 for (;;) {
1568 /* Any allowed, online CPU? */
1569 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1570 if (!cpu_online(dest_cpu))
1571 continue;
1572 if (!cpu_active(dest_cpu))
1573 continue;
1574 goto out;
1575 }
1576
1577 switch (state) {
1578 case cpuset:
1579 /* No more Mr. Nice Guy. */
1580 cpuset_cpus_allowed_fallback(p);
1581 state = possible;
1582 break;
1583
1584 case possible:
1585 do_set_cpus_allowed(p, cpu_possible_mask);
1586 state = fail;
1587 break;
1588
1589 case fail:
1590 BUG();
1591 break;
1592 }
1593 }
1594
1595 out:
1596 if (state != cpuset) {
1597 /*
1598 * Don't tell them about moving exiting tasks or
1599 * kernel threads (both mm NULL), since they never
1600 * leave kernel.
1601 */
1602 if (p->mm && printk_ratelimit()) {
1603 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
1604 task_pid_nr(p), p->comm, cpu);
1605 }
1606 }
1607
1608 return dest_cpu;
1609 }
1610
1611 /*
1612 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1613 */
1614 static inline
select_task_rq(struct task_struct * p,int cpu,int sd_flags,int wake_flags)1615 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1616 {
1617 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
1618
1619 /*
1620 * In order not to call set_task_cpu() on a blocking task we need
1621 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1622 * cpu.
1623 *
1624 * Since this is common to all placement strategies, this lives here.
1625 *
1626 * [ this allows ->select_task() to simply return task_cpu(p) and
1627 * not worry about this generic constraint ]
1628 */
1629 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
1630 !cpu_online(cpu)))
1631 cpu = select_fallback_rq(task_cpu(p), p);
1632
1633 return cpu;
1634 }
1635
update_avg(u64 * avg,u64 sample)1636 static void update_avg(u64 *avg, u64 sample)
1637 {
1638 s64 diff = sample - *avg;
1639 *avg += diff >> 3;
1640 }
1641
1642 #else
1643
__set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask,bool check)1644 static inline int __set_cpus_allowed_ptr(struct task_struct *p,
1645 const struct cpumask *new_mask, bool check)
1646 {
1647 return set_cpus_allowed_ptr(p, new_mask);
1648 }
1649
1650 #endif /* CONFIG_SMP */
1651
1652 static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)1653 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1654 {
1655 #ifdef CONFIG_SCHEDSTATS
1656 struct rq *rq = this_rq();
1657
1658 #ifdef CONFIG_SMP
1659 int this_cpu = smp_processor_id();
1660
1661 if (cpu == this_cpu) {
1662 schedstat_inc(rq, ttwu_local);
1663 schedstat_inc(p, se.statistics.nr_wakeups_local);
1664 } else {
1665 struct sched_domain *sd;
1666
1667 schedstat_inc(p, se.statistics.nr_wakeups_remote);
1668 rcu_read_lock();
1669 for_each_domain(this_cpu, sd) {
1670 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1671 schedstat_inc(sd, ttwu_wake_remote);
1672 break;
1673 }
1674 }
1675 rcu_read_unlock();
1676 }
1677
1678 if (wake_flags & WF_MIGRATED)
1679 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1680
1681 #endif /* CONFIG_SMP */
1682
1683 schedstat_inc(rq, ttwu_count);
1684 schedstat_inc(p, se.statistics.nr_wakeups);
1685
1686 if (wake_flags & WF_SYNC)
1687 schedstat_inc(p, se.statistics.nr_wakeups_sync);
1688
1689 #endif /* CONFIG_SCHEDSTATS */
1690 }
1691
ttwu_activate(struct rq * rq,struct task_struct * p,int en_flags)1692 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1693 {
1694 activate_task(rq, p, en_flags);
1695 p->on_rq = TASK_ON_RQ_QUEUED;
1696
1697 /* if a worker is waking up, notify workqueue */
1698 if (p->flags & PF_WQ_WORKER)
1699 wq_worker_waking_up(p, cpu_of(rq));
1700 }
1701
1702 /*
1703 * Mark the task runnable and perform wakeup-preemption.
1704 */
1705 static void
ttwu_do_wakeup(struct rq * rq,struct task_struct * p,int wake_flags)1706 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1707 {
1708 check_preempt_curr(rq, p, wake_flags);
1709 trace_sched_wakeup(p, true);
1710
1711 p->state = TASK_RUNNING;
1712 #ifdef CONFIG_SMP
1713 if (p->sched_class->task_woken)
1714 p->sched_class->task_woken(rq, p);
1715
1716 if (rq->idle_stamp) {
1717 u64 delta = rq_clock(rq) - rq->idle_stamp;
1718 u64 max = 2*rq->max_idle_balance_cost;
1719
1720 update_avg(&rq->avg_idle, delta);
1721
1722 if (rq->avg_idle > max)
1723 rq->avg_idle = max;
1724
1725 rq->idle_stamp = 0;
1726 }
1727 #endif
1728 }
1729
1730 static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags)1731 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1732 {
1733 #ifdef CONFIG_SMP
1734 if (p->sched_contributes_to_load)
1735 rq->nr_uninterruptible--;
1736 #endif
1737
1738 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1739 ttwu_do_wakeup(rq, p, wake_flags);
1740 }
1741
1742 /*
1743 * Called in case the task @p isn't fully descheduled from its runqueue,
1744 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1745 * since all we need to do is flip p->state to TASK_RUNNING, since
1746 * the task is still ->on_rq.
1747 */
ttwu_remote(struct task_struct * p,int wake_flags)1748 static int ttwu_remote(struct task_struct *p, int wake_flags)
1749 {
1750 struct rq *rq;
1751 int ret = 0;
1752
1753 rq = __task_rq_lock(p);
1754 if (task_on_rq_queued(p)) {
1755 /* check_preempt_curr() may use rq clock */
1756 update_rq_clock(rq);
1757 ttwu_do_wakeup(rq, p, wake_flags);
1758 ret = 1;
1759 }
1760 __task_rq_unlock(rq);
1761
1762 return ret;
1763 }
1764
1765 #ifdef CONFIG_SMP
sched_ttwu_pending(void)1766 void sched_ttwu_pending(void)
1767 {
1768 struct rq *rq = this_rq();
1769 struct llist_node *llist = llist_del_all(&rq->wake_list);
1770 struct task_struct *p;
1771 unsigned long flags;
1772
1773 if (!llist)
1774 return;
1775
1776 raw_spin_lock_irqsave(&rq->lock, flags);
1777
1778 while (llist) {
1779 p = llist_entry(llist, struct task_struct, wake_entry);
1780 llist = llist_next(llist);
1781 ttwu_do_activate(rq, p, 0);
1782 }
1783
1784 raw_spin_unlock_irqrestore(&rq->lock, flags);
1785 }
1786
scheduler_ipi(void)1787 void scheduler_ipi(void)
1788 {
1789 /*
1790 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1791 * TIF_NEED_RESCHED remotely (for the first time) will also send
1792 * this IPI.
1793 */
1794 preempt_fold_need_resched();
1795
1796 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
1797 return;
1798
1799 /*
1800 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1801 * traditionally all their work was done from the interrupt return
1802 * path. Now that we actually do some work, we need to make sure
1803 * we do call them.
1804 *
1805 * Some archs already do call them, luckily irq_enter/exit nest
1806 * properly.
1807 *
1808 * Arguably we should visit all archs and update all handlers,
1809 * however a fair share of IPIs are still resched only so this would
1810 * somewhat pessimize the simple resched case.
1811 */
1812 irq_enter();
1813 sched_ttwu_pending();
1814
1815 /*
1816 * Check if someone kicked us for doing the nohz idle load balance.
1817 */
1818 if (unlikely(got_nohz_idle_kick())) {
1819 this_rq()->idle_balance = 1;
1820 raise_softirq_irqoff(SCHED_SOFTIRQ);
1821 }
1822 irq_exit();
1823 }
1824
ttwu_queue_remote(struct task_struct * p,int cpu)1825 static void ttwu_queue_remote(struct task_struct *p, int cpu)
1826 {
1827 struct rq *rq = cpu_rq(cpu);
1828
1829 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
1830 if (!set_nr_if_polling(rq->idle))
1831 smp_send_reschedule(cpu);
1832 else
1833 trace_sched_wake_idle_without_ipi(cpu);
1834 }
1835 }
1836
wake_up_if_idle(int cpu)1837 void wake_up_if_idle(int cpu)
1838 {
1839 struct rq *rq = cpu_rq(cpu);
1840 unsigned long flags;
1841
1842 rcu_read_lock();
1843
1844 if (!is_idle_task(rcu_dereference(rq->curr)))
1845 goto out;
1846
1847 if (set_nr_if_polling(rq->idle)) {
1848 trace_sched_wake_idle_without_ipi(cpu);
1849 } else {
1850 raw_spin_lock_irqsave(&rq->lock, flags);
1851 if (is_idle_task(rq->curr))
1852 smp_send_reschedule(cpu);
1853 /* Else cpu is not in idle, do nothing here */
1854 raw_spin_unlock_irqrestore(&rq->lock, flags);
1855 }
1856
1857 out:
1858 rcu_read_unlock();
1859 }
1860
cpus_share_cache(int this_cpu,int that_cpu)1861 bool cpus_share_cache(int this_cpu, int that_cpu)
1862 {
1863 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1864 }
1865 #endif /* CONFIG_SMP */
1866
ttwu_queue(struct task_struct * p,int cpu)1867 static void ttwu_queue(struct task_struct *p, int cpu)
1868 {
1869 struct rq *rq = cpu_rq(cpu);
1870
1871 #if defined(CONFIG_SMP)
1872 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1873 sched_clock_cpu(cpu); /* sync clocks x-cpu */
1874 ttwu_queue_remote(p, cpu);
1875 return;
1876 }
1877 #endif
1878
1879 raw_spin_lock(&rq->lock);
1880 ttwu_do_activate(rq, p, 0);
1881 raw_spin_unlock(&rq->lock);
1882 }
1883
1884 /**
1885 * try_to_wake_up - wake up a thread
1886 * @p: the thread to be awakened
1887 * @state: the mask of task states that can be woken
1888 * @wake_flags: wake modifier flags (WF_*)
1889 *
1890 * Put it on the run-queue if it's not already there. The "current"
1891 * thread is always on the run-queue (except when the actual
1892 * re-schedule is in progress), and as such you're allowed to do
1893 * the simpler "current->state = TASK_RUNNING" to mark yourself
1894 * runnable without the overhead of this.
1895 *
1896 * Return: %true if @p was woken up, %false if it was already running.
1897 * or @state didn't match @p's state.
1898 */
1899 static int
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)1900 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1901 {
1902 unsigned long flags;
1903 int cpu, success = 0;
1904 #ifdef CONFIG_SMP
1905 struct rq *rq;
1906 u64 wallclock;
1907 #endif
1908
1909 /*
1910 * If we are going to wake up a thread waiting for CONDITION we
1911 * need to ensure that CONDITION=1 done by the caller can not be
1912 * reordered with p->state check below. This pairs with mb() in
1913 * set_current_state() the waiting thread does.
1914 */
1915 smp_mb__before_spinlock();
1916 raw_spin_lock_irqsave(&p->pi_lock, flags);
1917 if (!(p->state & state))
1918 goto out;
1919
1920 success = 1; /* we're going to change ->state */
1921 cpu = task_cpu(p);
1922
1923 /*
1924 * Ensure we load p->on_rq _after_ p->state, otherwise it would
1925 * be possible to, falsely, observe p->on_rq == 0 and get stuck
1926 * in smp_cond_load_acquire() below.
1927 *
1928 * sched_ttwu_pending() try_to_wake_up()
1929 * [S] p->on_rq = 1; [L] P->state
1930 * UNLOCK rq->lock -----.
1931 * \
1932 * +--- RMB
1933 * schedule() /
1934 * LOCK rq->lock -----'
1935 * UNLOCK rq->lock
1936 *
1937 * [task p]
1938 * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
1939 *
1940 * Pairs with the UNLOCK+LOCK on rq->lock from the
1941 * last wakeup of our task and the schedule that got our task
1942 * current.
1943 */
1944 smp_rmb();
1945 if (p->on_rq && ttwu_remote(p, wake_flags))
1946 goto stat;
1947
1948 #ifdef CONFIG_SMP
1949 /*
1950 * If the owning (remote) cpu is still in the middle of schedule() with
1951 * this task as prev, wait until its done referencing the task.
1952 */
1953 while (p->on_cpu)
1954 cpu_relax();
1955 /*
1956 * Pairs with the smp_wmb() in finish_lock_switch().
1957 */
1958 smp_rmb();
1959
1960 rq = cpu_rq(task_cpu(p));
1961
1962 raw_spin_lock(&rq->lock);
1963 wallclock = walt_ktime_clock();
1964 walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
1965 walt_update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
1966 raw_spin_unlock(&rq->lock);
1967
1968 p->sched_contributes_to_load = !!task_contributes_to_load(p);
1969 p->state = TASK_WAKING;
1970
1971 if (p->sched_class->task_waking)
1972 p->sched_class->task_waking(p);
1973
1974 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
1975
1976 if (task_cpu(p) != cpu) {
1977 wake_flags |= WF_MIGRATED;
1978 set_task_cpu(p, cpu);
1979 }
1980
1981 #endif /* CONFIG_SMP */
1982
1983 ttwu_queue(p, cpu);
1984 stat:
1985 ttwu_stat(p, cpu, wake_flags);
1986 out:
1987 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1988
1989 return success;
1990 }
1991
1992 /**
1993 * try_to_wake_up_local - try to wake up a local task with rq lock held
1994 * @p: the thread to be awakened
1995 *
1996 * Put @p on the run-queue if it's not already there. The caller must
1997 * ensure that this_rq() is locked, @p is bound to this_rq() and not
1998 * the current task.
1999 */
try_to_wake_up_local(struct task_struct * p)2000 static void try_to_wake_up_local(struct task_struct *p)
2001 {
2002 struct rq *rq = task_rq(p);
2003
2004 if (WARN_ON_ONCE(rq != this_rq()) ||
2005 WARN_ON_ONCE(p == current))
2006 return;
2007
2008 lockdep_assert_held(&rq->lock);
2009
2010 if (!raw_spin_trylock(&p->pi_lock)) {
2011 raw_spin_unlock(&rq->lock);
2012 raw_spin_lock(&p->pi_lock);
2013 raw_spin_lock(&rq->lock);
2014 }
2015
2016 if (!(p->state & TASK_NORMAL))
2017 goto out;
2018
2019 if (!task_on_rq_queued(p)) {
2020 u64 wallclock = walt_ktime_clock();
2021
2022 walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
2023 walt_update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
2024 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2025 }
2026
2027 ttwu_do_wakeup(rq, p, 0);
2028 ttwu_stat(p, smp_processor_id(), 0);
2029 out:
2030 raw_spin_unlock(&p->pi_lock);
2031 }
2032
2033 /**
2034 * wake_up_process - Wake up a specific process
2035 * @p: The process to be woken up.
2036 *
2037 * Attempt to wake up the nominated process and move it to the set of runnable
2038 * processes.
2039 *
2040 * Return: 1 if the process was woken up, 0 if it was already running.
2041 *
2042 * It may be assumed that this function implies a write memory barrier before
2043 * changing the task state if and only if any tasks are woken up.
2044 */
wake_up_process(struct task_struct * p)2045 int wake_up_process(struct task_struct *p)
2046 {
2047 WARN_ON(task_is_stopped_or_traced(p));
2048 return try_to_wake_up(p, TASK_NORMAL, 0);
2049 }
2050 EXPORT_SYMBOL(wake_up_process);
2051
wake_up_state(struct task_struct * p,unsigned int state)2052 int wake_up_state(struct task_struct *p, unsigned int state)
2053 {
2054 return try_to_wake_up(p, state, 0);
2055 }
2056
2057 /*
2058 * This function clears the sched_dl_entity static params.
2059 */
__dl_clear_params(struct task_struct * p)2060 void __dl_clear_params(struct task_struct *p)
2061 {
2062 struct sched_dl_entity *dl_se = &p->dl;
2063
2064 dl_se->dl_runtime = 0;
2065 dl_se->dl_deadline = 0;
2066 dl_se->dl_period = 0;
2067 dl_se->flags = 0;
2068 dl_se->dl_bw = 0;
2069
2070 dl_se->dl_throttled = 0;
2071 dl_se->dl_new = 1;
2072 dl_se->dl_yielded = 0;
2073 }
2074
2075 /*
2076 * Perform scheduler related setup for a newly forked process p.
2077 * p is forked by current.
2078 *
2079 * __sched_fork() is basic setup used by init_idle() too:
2080 */
__sched_fork(unsigned long clone_flags,struct task_struct * p)2081 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
2082 {
2083 p->on_rq = 0;
2084
2085 p->se.on_rq = 0;
2086 p->se.exec_start = 0;
2087 p->se.sum_exec_runtime = 0;
2088 p->se.prev_sum_exec_runtime = 0;
2089 p->se.nr_migrations = 0;
2090 p->se.vruntime = 0;
2091 INIT_LIST_HEAD(&p->se.group_node);
2092 walt_init_new_task_load(p);
2093
2094 #ifdef CONFIG_FAIR_GROUP_SCHED
2095 p->se.cfs_rq = NULL;
2096 #endif
2097
2098 #ifdef CONFIG_SCHEDSTATS
2099 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2100 #endif
2101
2102 RB_CLEAR_NODE(&p->dl.rb_node);
2103 init_dl_task_timer(&p->dl);
2104 __dl_clear_params(p);
2105
2106 INIT_LIST_HEAD(&p->rt.run_list);
2107
2108 #ifdef CONFIG_PREEMPT_NOTIFIERS
2109 INIT_HLIST_HEAD(&p->preempt_notifiers);
2110 #endif
2111
2112 #ifdef CONFIG_NUMA_BALANCING
2113 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
2114 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2115 p->mm->numa_scan_seq = 0;
2116 }
2117
2118 if (clone_flags & CLONE_VM)
2119 p->numa_preferred_nid = current->numa_preferred_nid;
2120 else
2121 p->numa_preferred_nid = -1;
2122
2123 p->node_stamp = 0ULL;
2124 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
2125 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
2126 p->numa_work.next = &p->numa_work;
2127 p->numa_faults_memory = NULL;
2128 p->numa_faults_buffer_memory = NULL;
2129 p->last_task_numa_placement = 0;
2130 p->last_sum_exec_runtime = 0;
2131
2132 INIT_LIST_HEAD(&p->numa_entry);
2133 p->numa_group = NULL;
2134 #endif /* CONFIG_NUMA_BALANCING */
2135 }
2136
2137 #ifdef CONFIG_NUMA_BALANCING
2138 #ifdef CONFIG_SCHED_DEBUG
set_numabalancing_state(bool enabled)2139 void set_numabalancing_state(bool enabled)
2140 {
2141 if (enabled)
2142 sched_feat_set("NUMA");
2143 else
2144 sched_feat_set("NO_NUMA");
2145 }
2146 #else
2147 __read_mostly bool numabalancing_enabled;
2148
set_numabalancing_state(bool enabled)2149 void set_numabalancing_state(bool enabled)
2150 {
2151 numabalancing_enabled = enabled;
2152 }
2153 #endif /* CONFIG_SCHED_DEBUG */
2154
2155 #ifdef CONFIG_PROC_SYSCTL
sysctl_numa_balancing(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)2156 int sysctl_numa_balancing(struct ctl_table *table, int write,
2157 void __user *buffer, size_t *lenp, loff_t *ppos)
2158 {
2159 struct ctl_table t;
2160 int err;
2161 int state = numabalancing_enabled;
2162
2163 if (write && !capable(CAP_SYS_ADMIN))
2164 return -EPERM;
2165
2166 t = *table;
2167 t.data = &state;
2168 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2169 if (err < 0)
2170 return err;
2171 if (write)
2172 set_numabalancing_state(state);
2173 return err;
2174 }
2175 #endif
2176 #endif
2177
2178 /*
2179 * fork()/clone()-time setup:
2180 */
sched_fork(unsigned long clone_flags,struct task_struct * p)2181 int sched_fork(unsigned long clone_flags, struct task_struct *p)
2182 {
2183 unsigned long flags;
2184 int cpu = get_cpu();
2185
2186 __sched_fork(clone_flags, p);
2187 /*
2188 * We mark the process as running here. This guarantees that
2189 * nobody will actually run it, and a signal or other external
2190 * event cannot wake it up and insert it on the runqueue either.
2191 */
2192 p->state = TASK_RUNNING;
2193
2194 /*
2195 * Make sure we do not leak PI boosting priority to the child.
2196 */
2197 p->prio = current->normal_prio;
2198
2199 /*
2200 * Revert to default priority/policy on fork if requested.
2201 */
2202 if (unlikely(p->sched_reset_on_fork)) {
2203 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
2204 p->policy = SCHED_NORMAL;
2205 p->static_prio = NICE_TO_PRIO(0);
2206 p->rt_priority = 0;
2207 } else if (PRIO_TO_NICE(p->static_prio) < 0)
2208 p->static_prio = NICE_TO_PRIO(0);
2209
2210 p->prio = p->normal_prio = __normal_prio(p);
2211 set_load_weight(p);
2212
2213 /*
2214 * We don't need the reset flag anymore after the fork. It has
2215 * fulfilled its duty:
2216 */
2217 p->sched_reset_on_fork = 0;
2218 }
2219
2220 if (dl_prio(p->prio)) {
2221 put_cpu();
2222 return -EAGAIN;
2223 } else if (rt_prio(p->prio)) {
2224 p->sched_class = &rt_sched_class;
2225 } else {
2226 p->sched_class = &fair_sched_class;
2227 }
2228
2229 if (p->sched_class->task_fork)
2230 p->sched_class->task_fork(p);
2231
2232 /*
2233 * The child is not yet in the pid-hash so no cgroup attach races,
2234 * and the cgroup is pinned to this child due to cgroup_fork()
2235 * is ran before sched_fork().
2236 *
2237 * Silence PROVE_RCU.
2238 */
2239 raw_spin_lock_irqsave(&p->pi_lock, flags);
2240 set_task_cpu(p, cpu);
2241 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2242
2243 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2244 if (likely(sched_info_on()))
2245 memset(&p->sched_info, 0, sizeof(p->sched_info));
2246 #endif
2247 #if defined(CONFIG_SMP)
2248 p->on_cpu = 0;
2249 #endif
2250 init_task_preempt_count(p);
2251 #ifdef CONFIG_SMP
2252 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2253 RB_CLEAR_NODE(&p->pushable_dl_tasks);
2254 #endif
2255
2256 put_cpu();
2257 return 0;
2258 }
2259
to_ratio(u64 period,u64 runtime)2260 unsigned long to_ratio(u64 period, u64 runtime)
2261 {
2262 if (runtime == RUNTIME_INF)
2263 return 1ULL << 20;
2264
2265 /*
2266 * Doing this here saves a lot of checks in all
2267 * the calling paths, and returning zero seems
2268 * safe for them anyway.
2269 */
2270 if (period == 0)
2271 return 0;
2272
2273 return div64_u64(runtime << 20, period);
2274 }
2275
2276 #ifdef CONFIG_SMP
dl_bw_of(int i)2277 inline struct dl_bw *dl_bw_of(int i)
2278 {
2279 rcu_lockdep_assert(rcu_read_lock_sched_held(),
2280 "sched RCU must be held");
2281 return &cpu_rq(i)->rd->dl_bw;
2282 }
2283
dl_bw_cpus(int i)2284 static inline int dl_bw_cpus(int i)
2285 {
2286 struct root_domain *rd = cpu_rq(i)->rd;
2287 int cpus = 0;
2288
2289 rcu_lockdep_assert(rcu_read_lock_sched_held(),
2290 "sched RCU must be held");
2291 for_each_cpu_and(i, rd->span, cpu_active_mask)
2292 cpus++;
2293
2294 return cpus;
2295 }
2296 #else
dl_bw_of(int i)2297 inline struct dl_bw *dl_bw_of(int i)
2298 {
2299 return &cpu_rq(i)->dl.dl_bw;
2300 }
2301
dl_bw_cpus(int i)2302 static inline int dl_bw_cpus(int i)
2303 {
2304 return 1;
2305 }
2306 #endif
2307
2308 static inline
__dl_clear(struct dl_bw * dl_b,u64 tsk_bw)2309 void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
2310 {
2311 dl_b->total_bw -= tsk_bw;
2312 }
2313
2314 static inline
__dl_add(struct dl_bw * dl_b,u64 tsk_bw)2315 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
2316 {
2317 dl_b->total_bw += tsk_bw;
2318 }
2319
2320 static inline
__dl_overflow(struct dl_bw * dl_b,int cpus,u64 old_bw,u64 new_bw)2321 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
2322 {
2323 return dl_b->bw != -1 &&
2324 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
2325 }
2326
2327 /*
2328 * We must be sure that accepting a new task (or allowing changing the
2329 * parameters of an existing one) is consistent with the bandwidth
2330 * constraints. If yes, this function also accordingly updates the currently
2331 * allocated bandwidth to reflect the new situation.
2332 *
2333 * This function is called while holding p's rq->lock.
2334 *
2335 * XXX we should delay bw change until the task's 0-lag point, see
2336 * __setparam_dl().
2337 */
dl_overflow(struct task_struct * p,int policy,const struct sched_attr * attr)2338 static int dl_overflow(struct task_struct *p, int policy,
2339 const struct sched_attr *attr)
2340 {
2341
2342 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2343 u64 period = attr->sched_period ?: attr->sched_deadline;
2344 u64 runtime = attr->sched_runtime;
2345 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2346 int cpus, err = -1;
2347
2348 if (new_bw == p->dl.dl_bw)
2349 return 0;
2350
2351 /*
2352 * Either if a task, enters, leave, or stays -deadline but changes
2353 * its parameters, we may need to update accordingly the total
2354 * allocated bandwidth of the container.
2355 */
2356 raw_spin_lock(&dl_b->lock);
2357 cpus = dl_bw_cpus(task_cpu(p));
2358 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2359 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2360 __dl_add(dl_b, new_bw);
2361 err = 0;
2362 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2363 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2364 __dl_clear(dl_b, p->dl.dl_bw);
2365 __dl_add(dl_b, new_bw);
2366 err = 0;
2367 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2368 __dl_clear(dl_b, p->dl.dl_bw);
2369 err = 0;
2370 }
2371 raw_spin_unlock(&dl_b->lock);
2372
2373 return err;
2374 }
2375
2376 extern void init_dl_bw(struct dl_bw *dl_b);
2377
2378 /*
2379 * wake_up_new_task - wake up a newly created task for the first time.
2380 *
2381 * This function will do some initial scheduler statistics housekeeping
2382 * that must be done for every newly created context, then puts the task
2383 * on the runqueue and wakes it.
2384 */
wake_up_new_task(struct task_struct * p)2385 void wake_up_new_task(struct task_struct *p)
2386 {
2387 unsigned long flags;
2388 struct rq *rq;
2389
2390 raw_spin_lock_irqsave(&p->pi_lock, flags);
2391
2392 walt_init_new_task_load(p);
2393
2394 /* Initialize new task's runnable average */
2395 init_entity_runnable_average(&p->se);
2396 #ifdef CONFIG_SMP
2397 /*
2398 * Fork balancing, do it here and not earlier because:
2399 * - cpus_allowed can change in the fork path
2400 * - any previously selected cpu might disappear through hotplug
2401 */
2402 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
2403 #endif
2404
2405 rq = __task_rq_lock(p);
2406 walt_mark_task_starting(p);
2407 activate_task(rq, p, ENQUEUE_WAKEUP_NEW);
2408 p->on_rq = TASK_ON_RQ_QUEUED;
2409 trace_sched_wakeup_new(p, true);
2410 check_preempt_curr(rq, p, WF_FORK);
2411 #ifdef CONFIG_SMP
2412 if (p->sched_class->task_woken)
2413 p->sched_class->task_woken(rq, p);
2414 #endif
2415 task_rq_unlock(rq, p, &flags);
2416 }
2417
2418 #ifdef CONFIG_PREEMPT_NOTIFIERS
2419
2420 /**
2421 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2422 * @notifier: notifier struct to register
2423 */
preempt_notifier_register(struct preempt_notifier * notifier)2424 void preempt_notifier_register(struct preempt_notifier *notifier)
2425 {
2426 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
2427 }
2428 EXPORT_SYMBOL_GPL(preempt_notifier_register);
2429
2430 /**
2431 * preempt_notifier_unregister - no longer interested in preemption notifications
2432 * @notifier: notifier struct to unregister
2433 *
2434 * This is safe to call from within a preemption notifier.
2435 */
preempt_notifier_unregister(struct preempt_notifier * notifier)2436 void preempt_notifier_unregister(struct preempt_notifier *notifier)
2437 {
2438 hlist_del(¬ifier->link);
2439 }
2440 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2441
fire_sched_in_preempt_notifiers(struct task_struct * curr)2442 static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2443 {
2444 struct preempt_notifier *notifier;
2445
2446 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2447 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2448 }
2449
2450 static void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)2451 fire_sched_out_preempt_notifiers(struct task_struct *curr,
2452 struct task_struct *next)
2453 {
2454 struct preempt_notifier *notifier;
2455
2456 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2457 notifier->ops->sched_out(notifier, next);
2458 }
2459
2460 #else /* !CONFIG_PREEMPT_NOTIFIERS */
2461
fire_sched_in_preempt_notifiers(struct task_struct * curr)2462 static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2463 {
2464 }
2465
2466 static void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)2467 fire_sched_out_preempt_notifiers(struct task_struct *curr,
2468 struct task_struct *next)
2469 {
2470 }
2471
2472 #endif /* CONFIG_PREEMPT_NOTIFIERS */
2473
2474 /**
2475 * prepare_task_switch - prepare to switch tasks
2476 * @rq: the runqueue preparing to switch
2477 * @prev: the current task that is being switched out
2478 * @next: the task we are going to switch to.
2479 *
2480 * This is called with the rq lock held and interrupts off. It must
2481 * be paired with a subsequent finish_task_switch after the context
2482 * switch.
2483 *
2484 * prepare_task_switch sets up locking and calls architecture specific
2485 * hooks.
2486 */
2487 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)2488 prepare_task_switch(struct rq *rq, struct task_struct *prev,
2489 struct task_struct *next)
2490 {
2491 trace_sched_switch(prev, next);
2492 sched_info_switch(rq, prev, next);
2493 perf_event_task_sched_out(prev, next);
2494 fire_sched_out_preempt_notifiers(prev, next);
2495 prepare_lock_switch(rq, next);
2496 prepare_arch_switch(next);
2497 }
2498
2499 /**
2500 * finish_task_switch - clean up after a task-switch
2501 * @rq: runqueue associated with task-switch
2502 * @prev: the thread we just switched away from.
2503 *
2504 * finish_task_switch must be called after the context switch, paired
2505 * with a prepare_task_switch call before the context switch.
2506 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2507 * and do any other architecture-specific cleanup actions.
2508 *
2509 * Note that we may have delayed dropping an mm in context_switch(). If
2510 * so, we finish that here outside of the runqueue lock. (Doing it
2511 * with the lock held can cause deadlocks; see schedule() for
2512 * details.)
2513 */
finish_task_switch(struct rq * rq,struct task_struct * prev)2514 static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2515 __releases(rq->lock)
2516 {
2517 struct mm_struct *mm = rq->prev_mm;
2518 long prev_state;
2519
2520 rq->prev_mm = NULL;
2521
2522 /*
2523 * A task struct has one reference for the use as "current".
2524 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
2525 * schedule one last time. The schedule call will never return, and
2526 * the scheduled task must drop that reference.
2527 *
2528 * We must observe prev->state before clearing prev->on_cpu (in
2529 * finish_lock_switch), otherwise a concurrent wakeup can get prev
2530 * running on another CPU and we could rave with its RUNNING -> DEAD
2531 * transition, resulting in a double drop.
2532 */
2533 prev_state = prev->state;
2534 vtime_task_switch(prev);
2535 finish_arch_switch(prev);
2536 perf_event_task_sched_in(prev, current);
2537 finish_lock_switch(rq, prev);
2538 finish_arch_post_lock_switch();
2539
2540 fire_sched_in_preempt_notifiers(current);
2541 if (mm)
2542 mmdrop(mm);
2543 if (unlikely(prev_state == TASK_DEAD)) {
2544 if (prev->sched_class->task_dead)
2545 prev->sched_class->task_dead(prev);
2546
2547 /*
2548 * Remove function-return probe instances associated with this
2549 * task and put them back on the free list.
2550 */
2551 kprobe_flush_task(prev);
2552 put_task_struct(prev);
2553 }
2554
2555 tick_nohz_task_switch(current);
2556 }
2557
2558 #ifdef CONFIG_SMP
2559
2560 /* rq->lock is NOT held, but preemption is disabled */
post_schedule(struct rq * rq)2561 static inline void post_schedule(struct rq *rq)
2562 {
2563 if (rq->post_schedule) {
2564 unsigned long flags;
2565
2566 raw_spin_lock_irqsave(&rq->lock, flags);
2567 if (rq->curr->sched_class->post_schedule)
2568 rq->curr->sched_class->post_schedule(rq);
2569 raw_spin_unlock_irqrestore(&rq->lock, flags);
2570
2571 rq->post_schedule = 0;
2572 }
2573 }
2574
2575 #else
2576
post_schedule(struct rq * rq)2577 static inline void post_schedule(struct rq *rq)
2578 {
2579 }
2580
2581 #endif
2582
2583 /**
2584 * schedule_tail - first thing a freshly forked thread must call.
2585 * @prev: the thread we just switched away from.
2586 */
schedule_tail(struct task_struct * prev)2587 asmlinkage __visible void schedule_tail(struct task_struct *prev)
2588 __releases(rq->lock)
2589 {
2590 struct rq *rq = this_rq();
2591
2592 finish_task_switch(rq, prev);
2593
2594 /*
2595 * FIXME: do we need to worry about rq being invalidated by the
2596 * task_switch?
2597 */
2598 post_schedule(rq);
2599
2600 if (current->set_child_tid)
2601 put_user(task_pid_vnr(current), current->set_child_tid);
2602 }
2603
2604 /*
2605 * context_switch - switch to the new MM and the new
2606 * thread's register state.
2607 */
2608 static inline void
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)2609 context_switch(struct rq *rq, struct task_struct *prev,
2610 struct task_struct *next)
2611 {
2612 struct mm_struct *mm, *oldmm;
2613
2614 prepare_task_switch(rq, prev, next);
2615
2616 mm = next->mm;
2617 oldmm = prev->active_mm;
2618 /*
2619 * For paravirt, this is coupled with an exit in switch_to to
2620 * combine the page table reload and the switch backend into
2621 * one hypercall.
2622 */
2623 arch_start_context_switch(prev);
2624
2625 if (!mm) {
2626 next->active_mm = oldmm;
2627 atomic_inc(&oldmm->mm_count);
2628 enter_lazy_tlb(oldmm, next);
2629 } else
2630 switch_mm(oldmm, mm, next);
2631
2632 if (!prev->mm) {
2633 prev->active_mm = NULL;
2634 rq->prev_mm = oldmm;
2635 }
2636 /*
2637 * Since the runqueue lock will be released by the next
2638 * task (which is an invalid locking op but in the case
2639 * of the scheduler it's an obvious special-case), so we
2640 * do an early lockdep release here:
2641 */
2642 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2643
2644 context_tracking_task_switch(prev, next);
2645 /* Here we just switch the register state and the stack. */
2646 switch_to(prev, next, prev);
2647
2648 barrier();
2649 /*
2650 * this_rq must be evaluated again because prev may have moved
2651 * CPUs since it called schedule(), thus the 'rq' on its stack
2652 * frame will be invalid.
2653 */
2654 finish_task_switch(this_rq(), prev);
2655 }
2656
2657 /*
2658 * nr_running and nr_context_switches:
2659 *
2660 * externally visible scheduler statistics: current number of runnable
2661 * threads, total number of context switches performed since bootup.
2662 */
nr_running(void)2663 unsigned long nr_running(void)
2664 {
2665 unsigned long i, sum = 0;
2666
2667 for_each_online_cpu(i)
2668 sum += cpu_rq(i)->nr_running;
2669
2670 return sum;
2671 }
2672
2673 /*
2674 * Check if only the current task is running on the cpu.
2675 *
2676 * Caution: this function does not check that the caller has disabled
2677 * preemption, thus the result might have a time-of-check-to-time-of-use
2678 * race. The caller is responsible to use it correctly, for example:
2679 *
2680 * - from a non-preemptable section (of course)
2681 *
2682 * - from a thread that is bound to a single CPU
2683 *
2684 * - in a loop with very short iterations (e.g. a polling loop)
2685 */
single_task_running(void)2686 bool single_task_running(void)
2687 {
2688 return raw_rq()->nr_running == 1;
2689 }
2690 EXPORT_SYMBOL(single_task_running);
2691
nr_context_switches(void)2692 unsigned long long nr_context_switches(void)
2693 {
2694 int i;
2695 unsigned long long sum = 0;
2696
2697 for_each_possible_cpu(i)
2698 sum += cpu_rq(i)->nr_switches;
2699
2700 return sum;
2701 }
2702
nr_iowait(void)2703 unsigned long nr_iowait(void)
2704 {
2705 unsigned long i, sum = 0;
2706
2707 for_each_possible_cpu(i)
2708 sum += atomic_read(&cpu_rq(i)->nr_iowait);
2709
2710 return sum;
2711 }
2712
nr_iowait_cpu(int cpu)2713 unsigned long nr_iowait_cpu(int cpu)
2714 {
2715 struct rq *this = cpu_rq(cpu);
2716 return atomic_read(&this->nr_iowait);
2717 }
2718
2719 #ifdef CONFIG_CPU_QUIET
nr_running_integral(unsigned int cpu)2720 u64 nr_running_integral(unsigned int cpu)
2721 {
2722 unsigned int seqcnt;
2723 u64 integral;
2724 struct rq *q;
2725
2726 if (cpu >= nr_cpu_ids)
2727 return 0;
2728
2729 q = cpu_rq(cpu);
2730
2731 /*
2732 * Update average to avoid reading stalled value if there were
2733 * no run-queue changes for a long time. On the other hand if
2734 * the changes are happening right now, just read current value
2735 * directly.
2736 */
2737
2738 seqcnt = read_seqcount_begin(&q->ave_seqcnt);
2739 integral = do_nr_running_integral(q);
2740 if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) {
2741 read_seqcount_begin(&q->ave_seqcnt);
2742 integral = q->nr_running_integral;
2743 }
2744
2745 return integral;
2746 }
2747 #endif
2748
get_iowait_load(unsigned long * nr_waiters,unsigned long * load)2749 void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2750 {
2751 struct rq *rq = this_rq();
2752 *nr_waiters = atomic_read(&rq->nr_iowait);
2753 *load = rq->load.weight;
2754 }
2755
2756 #ifdef CONFIG_SMP
2757
2758 /*
2759 * sched_exec - execve() is a valuable balancing opportunity, because at
2760 * this point the task has the smallest effective memory and cache footprint.
2761 */
sched_exec(void)2762 void sched_exec(void)
2763 {
2764 struct task_struct *p = current;
2765 unsigned long flags;
2766 int dest_cpu;
2767
2768 raw_spin_lock_irqsave(&p->pi_lock, flags);
2769 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
2770 if (dest_cpu == smp_processor_id())
2771 goto unlock;
2772
2773 if (likely(cpu_active(dest_cpu))) {
2774 struct migration_arg arg = { p, dest_cpu };
2775
2776 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2777 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
2778 return;
2779 }
2780 unlock:
2781 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2782 }
2783
2784 #endif
2785
2786 DEFINE_PER_CPU(struct kernel_stat, kstat);
2787 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
2788
2789 EXPORT_PER_CPU_SYMBOL(kstat);
2790 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2791
2792 /*
2793 * Return accounted runtime for the task.
2794 * In case the task is currently running, return the runtime plus current's
2795 * pending runtime that have not been accounted yet.
2796 */
task_sched_runtime(struct task_struct * p)2797 unsigned long long task_sched_runtime(struct task_struct *p)
2798 {
2799 unsigned long flags;
2800 struct rq *rq;
2801 u64 ns;
2802
2803 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
2804 /*
2805 * 64-bit doesn't need locks to atomically read a 64bit value.
2806 * So we have a optimization chance when the task's delta_exec is 0.
2807 * Reading ->on_cpu is racy, but this is ok.
2808 *
2809 * If we race with it leaving cpu, we'll take a lock. So we're correct.
2810 * If we race with it entering cpu, unaccounted time is 0. This is
2811 * indistinguishable from the read occurring a few cycles earlier.
2812 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
2813 * been accounted, so we're correct here as well.
2814 */
2815 if (!p->on_cpu || !task_on_rq_queued(p))
2816 return p->se.sum_exec_runtime;
2817 #endif
2818
2819 rq = task_rq_lock(p, &flags);
2820 /*
2821 * Must be ->curr _and_ ->on_rq. If dequeued, we would
2822 * project cycles that may never be accounted to this
2823 * thread, breaking clock_gettime().
2824 */
2825 if (task_current(rq, p) && task_on_rq_queued(p)) {
2826 update_rq_clock(rq);
2827 p->sched_class->update_curr(rq);
2828 }
2829 ns = p->se.sum_exec_runtime;
2830 task_rq_unlock(rq, p, &flags);
2831
2832 return ns;
2833 }
2834
2835 #ifdef CONFIG_CPU_FREQ_GOV_SCHED
2836
2837 static inline
add_capacity_margin(unsigned long cpu_capacity)2838 unsigned long add_capacity_margin(unsigned long cpu_capacity)
2839 {
2840 cpu_capacity = cpu_capacity * capacity_margin;
2841 cpu_capacity /= SCHED_CAPACITY_SCALE;
2842 return cpu_capacity;
2843 }
2844
2845 static inline
sum_capacity_reqs(unsigned long cfs_cap,struct sched_capacity_reqs * scr)2846 unsigned long sum_capacity_reqs(unsigned long cfs_cap,
2847 struct sched_capacity_reqs *scr)
2848 {
2849 unsigned long total = add_capacity_margin(cfs_cap + scr->rt);
2850 return total += scr->dl;
2851 }
2852
sched_freq_tick_pelt(int cpu)2853 static void sched_freq_tick_pelt(int cpu)
2854 {
2855 unsigned long cpu_utilization = capacity_max;
2856 unsigned long capacity_curr = capacity_curr_of(cpu);
2857 struct sched_capacity_reqs *scr;
2858
2859 scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
2860 if (sum_capacity_reqs(cpu_utilization, scr) < capacity_curr)
2861 return;
2862
2863 /*
2864 * To make free room for a task that is building up its "real"
2865 * utilization and to harm its performance the least, request
2866 * a jump to a higher OPP as soon as the margin of free capacity
2867 * is impacted (specified by capacity_margin).
2868 */
2869 set_cfs_cpu_capacity(cpu, true, cpu_utilization);
2870 }
2871
2872 #ifdef CONFIG_SCHED_WALT
sched_freq_tick_walt(int cpu)2873 static void sched_freq_tick_walt(int cpu)
2874 {
2875 unsigned long cpu_utilization = cpu_util(cpu);
2876 unsigned long capacity_curr = capacity_curr_of(cpu);
2877
2878 if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
2879 return sched_freq_tick_pelt(cpu);
2880
2881 /*
2882 * Add a margin to the WALT utilization.
2883 * NOTE: WALT tracks a single CPU signal for all the scheduling
2884 * classes, thus this margin is going to be added to the DL class as
2885 * well, which is something we do not do in sched_freq_tick_pelt case.
2886 */
2887 cpu_utilization = add_capacity_margin(cpu_utilization);
2888 if (cpu_utilization <= capacity_curr)
2889 return;
2890
2891 /*
2892 * It is likely that the load is growing so we
2893 * keep the added margin in our request as an
2894 * extra boost.
2895 */
2896 set_cfs_cpu_capacity(cpu, true, cpu_utilization);
2897
2898 }
2899 #define _sched_freq_tick(cpu) sched_freq_tick_walt(cpu)
2900 #else
2901 #define _sched_freq_tick(cpu) sched_freq_tick_pelt(cpu)
2902 #endif /* CONFIG_SCHED_WALT */
2903
sched_freq_tick(int cpu)2904 static void sched_freq_tick(int cpu)
2905 {
2906 unsigned long capacity_orig, capacity_curr;
2907
2908 if (!sched_freq())
2909 return;
2910
2911 capacity_orig = capacity_orig_of(cpu);
2912 capacity_curr = capacity_curr_of(cpu);
2913 if (capacity_curr == capacity_orig)
2914 return;
2915
2916 _sched_freq_tick(cpu);
2917 }
2918 #else
sched_freq_tick(int cpu)2919 static inline void sched_freq_tick(int cpu) { }
2920 #endif /* CONFIG_CPU_FREQ_GOV_SCHED */
2921
2922 /*
2923 * This function gets called by the timer code, with HZ frequency.
2924 * We call it with interrupts disabled.
2925 */
scheduler_tick(void)2926 void scheduler_tick(void)
2927 {
2928 int cpu = smp_processor_id();
2929 struct rq *rq = cpu_rq(cpu);
2930 struct task_struct *curr = rq->curr;
2931
2932 sched_clock_tick();
2933
2934 raw_spin_lock(&rq->lock);
2935 walt_set_window_start(rq);
2936 update_rq_clock(rq);
2937 curr->sched_class->task_tick(rq, curr, 0);
2938 update_cpu_load_active(rq);
2939 walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
2940 walt_ktime_clock(), 0);
2941 calc_global_load_tick(rq);
2942 sched_freq_tick(cpu);
2943 raw_spin_unlock(&rq->lock);
2944
2945 perf_event_task_tick();
2946
2947 #ifdef CONFIG_SMP
2948 rq->idle_balance = idle_cpu(cpu);
2949 trigger_load_balance(rq);
2950 #endif
2951 rq_last_tick_reset(rq);
2952 }
2953
2954 #ifdef CONFIG_NO_HZ_FULL
2955 /**
2956 * scheduler_tick_max_deferment
2957 *
2958 * Keep at least one tick per second when a single
2959 * active task is running because the scheduler doesn't
2960 * yet completely support full dynticks environment.
2961 *
2962 * This makes sure that uptime, CFS vruntime, load
2963 * balancing, etc... continue to move forward, even
2964 * with a very low granularity.
2965 *
2966 * Return: Maximum deferment in nanoseconds.
2967 */
scheduler_tick_max_deferment(void)2968 u64 scheduler_tick_max_deferment(void)
2969 {
2970 struct rq *rq = this_rq();
2971 unsigned long next, now = READ_ONCE(jiffies);
2972
2973 next = rq->last_sched_tick + HZ;
2974
2975 if (time_before_eq(next, now))
2976 return 0;
2977
2978 return jiffies_to_nsecs(next - now);
2979 }
2980 #endif
2981
get_parent_ip(unsigned long addr)2982 notrace unsigned long get_parent_ip(unsigned long addr)
2983 {
2984 if (in_lock_functions(addr)) {
2985 addr = CALLER_ADDR2;
2986 if (in_lock_functions(addr))
2987 addr = CALLER_ADDR3;
2988 }
2989 return addr;
2990 }
2991
2992 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2993 defined(CONFIG_PREEMPT_TRACER))
2994
preempt_count_add(int val)2995 void preempt_count_add(int val)
2996 {
2997 #ifdef CONFIG_DEBUG_PREEMPT
2998 /*
2999 * Underflow?
3000 */
3001 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3002 return;
3003 #endif
3004 __preempt_count_add(val);
3005 #ifdef CONFIG_DEBUG_PREEMPT
3006 /*
3007 * Spinlock count overflowing soon?
3008 */
3009 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3010 PREEMPT_MASK - 10);
3011 #endif
3012 if (preempt_count() == val) {
3013 unsigned long ip = get_parent_ip(CALLER_ADDR1);
3014 #ifdef CONFIG_DEBUG_PREEMPT
3015 current->preempt_disable_ip = ip;
3016 #endif
3017 trace_preempt_off(CALLER_ADDR0, ip);
3018 }
3019 }
3020 EXPORT_SYMBOL(preempt_count_add);
3021 NOKPROBE_SYMBOL(preempt_count_add);
3022
preempt_count_sub(int val)3023 void preempt_count_sub(int val)
3024 {
3025 #ifdef CONFIG_DEBUG_PREEMPT
3026 /*
3027 * Underflow?
3028 */
3029 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3030 return;
3031 /*
3032 * Is the spinlock portion underflowing?
3033 */
3034 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3035 !(preempt_count() & PREEMPT_MASK)))
3036 return;
3037 #endif
3038
3039 if (preempt_count() == val)
3040 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3041 __preempt_count_sub(val);
3042 }
3043 EXPORT_SYMBOL(preempt_count_sub);
3044 NOKPROBE_SYMBOL(preempt_count_sub);
3045
3046 #endif
3047
3048 /*
3049 * Print scheduling while atomic bug:
3050 */
__schedule_bug(struct task_struct * prev)3051 static noinline void __schedule_bug(struct task_struct *prev)
3052 {
3053 if (oops_in_progress)
3054 return;
3055
3056 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3057 prev->comm, prev->pid, preempt_count());
3058
3059 debug_show_held_locks(prev);
3060 print_modules();
3061 if (irqs_disabled())
3062 print_irqtrace_events(prev);
3063 #ifdef CONFIG_DEBUG_PREEMPT
3064 if (in_atomic_preempt_off()) {
3065 pr_err("Preemption disabled at:");
3066 print_ip_sym(current->preempt_disable_ip);
3067 pr_cont("\n");
3068 }
3069 #endif
3070 dump_stack();
3071 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
3072 }
3073
3074 /*
3075 * Various schedule()-time debugging checks and statistics:
3076 */
schedule_debug(struct task_struct * prev)3077 static inline void schedule_debug(struct task_struct *prev)
3078 {
3079 #ifdef CONFIG_SCHED_STACK_END_CHECK
3080 if (task_stack_end_corrupted(prev))
3081 panic("corrupted stack end detected inside scheduler\n");
3082 #endif
3083 /*
3084 * Test if we are atomic. Since do_exit() needs to call into
3085 * schedule() atomically, we ignore that path. Otherwise whine
3086 * if we are scheduling when we should not.
3087 */
3088 if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
3089 __schedule_bug(prev);
3090 rcu_sleep_check();
3091
3092 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3093
3094 schedstat_inc(this_rq(), sched_count);
3095 }
3096
3097 /*
3098 * Pick up the highest-prio task:
3099 */
3100 static inline struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev)3101 pick_next_task(struct rq *rq, struct task_struct *prev)
3102 {
3103 const struct sched_class *class = &fair_sched_class;
3104 struct task_struct *p;
3105
3106 /*
3107 * Optimization: we know that if all tasks are in
3108 * the fair class we can call that function directly:
3109 */
3110 if (likely(prev->sched_class == class &&
3111 rq->nr_running == rq->cfs.h_nr_running)) {
3112 p = fair_sched_class.pick_next_task(rq, prev);
3113 if (unlikely(p == RETRY_TASK))
3114 goto again;
3115
3116 /* assumes fair_sched_class->next == idle_sched_class */
3117 if (unlikely(!p))
3118 p = idle_sched_class.pick_next_task(rq, prev);
3119
3120 return p;
3121 }
3122
3123 again:
3124 for_each_class(class) {
3125 p = class->pick_next_task(rq, prev);
3126 if (p) {
3127 if (unlikely(p == RETRY_TASK))
3128 goto again;
3129 return p;
3130 }
3131 }
3132
3133 BUG(); /* the idle class will always have a runnable task */
3134 }
3135
3136 /*
3137 * __schedule() is the main scheduler function.
3138 *
3139 * The main means of driving the scheduler and thus entering this function are:
3140 *
3141 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
3142 *
3143 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
3144 * paths. For example, see arch/x86/entry_64.S.
3145 *
3146 * To drive preemption between tasks, the scheduler sets the flag in timer
3147 * interrupt handler scheduler_tick().
3148 *
3149 * 3. Wakeups don't really cause entry into schedule(). They add a
3150 * task to the run-queue and that's it.
3151 *
3152 * Now, if the new task added to the run-queue preempts the current
3153 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
3154 * called on the nearest possible occasion:
3155 *
3156 * - If the kernel is preemptible (CONFIG_PREEMPT=y):
3157 *
3158 * - in syscall or exception context, at the next outmost
3159 * preempt_enable(). (this might be as soon as the wake_up()'s
3160 * spin_unlock()!)
3161 *
3162 * - in IRQ context, return from interrupt-handler to
3163 * preemptible context
3164 *
3165 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
3166 * then at the next:
3167 *
3168 * - cond_resched() call
3169 * - explicit schedule() call
3170 * - return from syscall or exception to user-space
3171 * - return from interrupt-handler to user-space
3172 */
__schedule(void)3173 static void __sched __schedule(void)
3174 {
3175 struct task_struct *prev, *next;
3176 unsigned long *switch_count;
3177 struct rq *rq;
3178 int cpu;
3179 u64 wallclock;
3180
3181 need_resched:
3182 preempt_disable();
3183 cpu = smp_processor_id();
3184 rq = cpu_rq(cpu);
3185 rcu_note_context_switch(cpu);
3186 prev = rq->curr;
3187
3188 schedule_debug(prev);
3189
3190 if (sched_feat(HRTICK))
3191 hrtick_clear(rq);
3192
3193 /*
3194 * Make sure that signal_pending_state()->signal_pending() below
3195 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
3196 * done by the caller to avoid the race with signal_wake_up().
3197 */
3198 smp_mb__before_spinlock();
3199 raw_spin_lock_irq(&rq->lock);
3200
3201 switch_count = &prev->nivcsw;
3202 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3203 if (unlikely(signal_pending_state(prev->state, prev))) {
3204 prev->state = TASK_RUNNING;
3205 } else {
3206 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3207 prev->on_rq = 0;
3208
3209 /*
3210 * If a worker went to sleep, notify and ask workqueue
3211 * whether it wants to wake up a task to maintain
3212 * concurrency.
3213 */
3214 if (prev->flags & PF_WQ_WORKER) {
3215 struct task_struct *to_wakeup;
3216
3217 to_wakeup = wq_worker_sleeping(prev, cpu);
3218 if (to_wakeup)
3219 try_to_wake_up_local(to_wakeup);
3220 }
3221 }
3222 switch_count = &prev->nvcsw;
3223 }
3224
3225 if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
3226 update_rq_clock(rq);
3227
3228 next = pick_next_task(rq, prev);
3229 wallclock = walt_ktime_clock();
3230 walt_update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
3231 walt_update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
3232 clear_tsk_need_resched(prev);
3233 clear_preempt_need_resched();
3234 rq->skip_clock_update = 0;
3235
3236 if (likely(prev != next)) {
3237 rq->nr_switches++;
3238 rq->curr = next;
3239 ++*switch_count;
3240
3241 context_switch(rq, prev, next); /* unlocks the rq */
3242 /*
3243 * The context switch have flipped the stack from under us
3244 * and restored the local variables which were saved when
3245 * this task called schedule() in the past. prev == current
3246 * is still correct, but it can be moved to another cpu/rq.
3247 */
3248 cpu = smp_processor_id();
3249 rq = cpu_rq(cpu);
3250 } else
3251 raw_spin_unlock_irq(&rq->lock);
3252
3253 post_schedule(rq);
3254
3255 sched_preempt_enable_no_resched();
3256 if (need_resched())
3257 goto need_resched;
3258 }
3259
sched_submit_work(struct task_struct * tsk)3260 static inline void sched_submit_work(struct task_struct *tsk)
3261 {
3262 if (!tsk->state || tsk_is_pi_blocked(tsk))
3263 return;
3264 /*
3265 * If we are going to sleep and we have plugged IO queued,
3266 * make sure to submit it to avoid deadlocks.
3267 */
3268 if (blk_needs_flush_plug(tsk))
3269 blk_schedule_flush_plug(tsk);
3270 }
3271
schedule(void)3272 asmlinkage __visible void __sched schedule(void)
3273 {
3274 struct task_struct *tsk = current;
3275
3276 sched_submit_work(tsk);
3277 __schedule();
3278 }
3279 EXPORT_SYMBOL(schedule);
3280
3281 #ifdef CONFIG_CONTEXT_TRACKING
schedule_user(void)3282 asmlinkage __visible void __sched schedule_user(void)
3283 {
3284 /*
3285 * If we come here after a random call to set_need_resched(),
3286 * or we have been woken up remotely but the IPI has not yet arrived,
3287 * we haven't yet exited the RCU idle mode. Do it here manually until
3288 * we find a better solution.
3289 *
3290 * NB: There are buggy callers of this function. Ideally we
3291 * should warn if prev_state != IN_USER, but that will trigger
3292 * too frequently to make sense yet.
3293 */
3294 enum ctx_state prev_state = exception_enter();
3295 schedule();
3296 exception_exit(prev_state);
3297 }
3298 #endif
3299
3300 /**
3301 * schedule_preempt_disabled - called with preemption disabled
3302 *
3303 * Returns with preemption disabled. Note: preempt_count must be 1
3304 */
schedule_preempt_disabled(void)3305 void __sched schedule_preempt_disabled(void)
3306 {
3307 sched_preempt_enable_no_resched();
3308 schedule();
3309 preempt_disable();
3310 }
3311
3312 #ifdef CONFIG_PREEMPT
3313 /*
3314 * this is the entry point to schedule() from in-kernel preemption
3315 * off of preempt_enable. Kernel preemptions off return from interrupt
3316 * occur there and call schedule directly.
3317 */
preempt_schedule(void)3318 asmlinkage __visible void __sched notrace preempt_schedule(void)
3319 {
3320 /*
3321 * If there is a non-zero preempt_count or interrupts are disabled,
3322 * we do not want to preempt the current task. Just return..
3323 */
3324 if (likely(!preemptible()))
3325 return;
3326
3327 do {
3328 __preempt_count_add(PREEMPT_ACTIVE);
3329 __schedule();
3330 __preempt_count_sub(PREEMPT_ACTIVE);
3331
3332 /*
3333 * Check again in case we missed a preemption opportunity
3334 * between schedule and now.
3335 */
3336 barrier();
3337 } while (need_resched());
3338 }
3339 NOKPROBE_SYMBOL(preempt_schedule);
3340 EXPORT_SYMBOL(preempt_schedule);
3341
3342 #ifdef CONFIG_CONTEXT_TRACKING
3343 /**
3344 * preempt_schedule_context - preempt_schedule called by tracing
3345 *
3346 * The tracing infrastructure uses preempt_enable_notrace to prevent
3347 * recursion and tracing preempt enabling caused by the tracing
3348 * infrastructure itself. But as tracing can happen in areas coming
3349 * from userspace or just about to enter userspace, a preempt enable
3350 * can occur before user_exit() is called. This will cause the scheduler
3351 * to be called when the system is still in usermode.
3352 *
3353 * To prevent this, the preempt_enable_notrace will use this function
3354 * instead of preempt_schedule() to exit user context if needed before
3355 * calling the scheduler.
3356 */
preempt_schedule_context(void)3357 asmlinkage __visible void __sched notrace preempt_schedule_context(void)
3358 {
3359 enum ctx_state prev_ctx;
3360
3361 if (likely(!preemptible()))
3362 return;
3363
3364 do {
3365 __preempt_count_add(PREEMPT_ACTIVE);
3366 /*
3367 * Needs preempt disabled in case user_exit() is traced
3368 * and the tracer calls preempt_enable_notrace() causing
3369 * an infinite recursion.
3370 */
3371 prev_ctx = exception_enter();
3372 __schedule();
3373 exception_exit(prev_ctx);
3374
3375 __preempt_count_sub(PREEMPT_ACTIVE);
3376 barrier();
3377 } while (need_resched());
3378 }
3379 EXPORT_SYMBOL_GPL(preempt_schedule_context);
3380 #endif /* CONFIG_CONTEXT_TRACKING */
3381
3382 #endif /* CONFIG_PREEMPT */
3383
3384 /*
3385 * this is the entry point to schedule() from kernel preemption
3386 * off of irq context.
3387 * Note, that this is called and return with irqs disabled. This will
3388 * protect us against recursive calling from irq.
3389 */
preempt_schedule_irq(void)3390 asmlinkage __visible void __sched preempt_schedule_irq(void)
3391 {
3392 enum ctx_state prev_state;
3393
3394 /* Catch callers which need to be fixed */
3395 BUG_ON(preempt_count() || !irqs_disabled());
3396
3397 prev_state = exception_enter();
3398
3399 do {
3400 __preempt_count_add(PREEMPT_ACTIVE);
3401 local_irq_enable();
3402 __schedule();
3403 local_irq_disable();
3404 __preempt_count_sub(PREEMPT_ACTIVE);
3405
3406 /*
3407 * Check again in case we missed a preemption opportunity
3408 * between schedule and now.
3409 */
3410 barrier();
3411 } while (need_resched());
3412
3413 exception_exit(prev_state);
3414 }
3415
default_wake_function(wait_queue_t * curr,unsigned mode,int wake_flags,void * key)3416 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
3417 void *key)
3418 {
3419 return try_to_wake_up(curr->private, mode, wake_flags);
3420 }
3421 EXPORT_SYMBOL(default_wake_function);
3422
3423 #ifdef CONFIG_RT_MUTEXES
3424
3425 /*
3426 * rt_mutex_setprio - set the current priority of a task
3427 * @p: task
3428 * @prio: prio value (kernel-internal form)
3429 *
3430 * This function changes the 'effective' priority of a task. It does
3431 * not touch ->normal_prio like __setscheduler().
3432 *
3433 * Used by the rt_mutex code to implement priority inheritance
3434 * logic. Call site only calls if the priority of the task changed.
3435 */
rt_mutex_setprio(struct task_struct * p,int prio)3436 void rt_mutex_setprio(struct task_struct *p, int prio)
3437 {
3438 int oldprio, queued, running, enqueue_flag = 0;
3439 struct rq *rq;
3440 const struct sched_class *prev_class;
3441
3442 BUG_ON(prio > MAX_PRIO);
3443
3444 rq = __task_rq_lock(p);
3445
3446 /*
3447 * Idle task boosting is a nono in general. There is one
3448 * exception, when PREEMPT_RT and NOHZ is active:
3449 *
3450 * The idle task calls get_next_timer_interrupt() and holds
3451 * the timer wheel base->lock on the CPU and another CPU wants
3452 * to access the timer (probably to cancel it). We can safely
3453 * ignore the boosting request, as the idle CPU runs this code
3454 * with interrupts disabled and will complete the lock
3455 * protected section without being interrupted. So there is no
3456 * real need to boost.
3457 */
3458 if (unlikely(p == rq->idle)) {
3459 WARN_ON(p != rq->curr);
3460 WARN_ON(p->pi_blocked_on);
3461 goto out_unlock;
3462 }
3463
3464 trace_sched_pi_setprio(p, prio);
3465 oldprio = p->prio;
3466 prev_class = p->sched_class;
3467 queued = task_on_rq_queued(p);
3468 running = task_current(rq, p);
3469 if (queued)
3470 dequeue_task(rq, p, 0);
3471 if (running)
3472 put_prev_task(rq, p);
3473
3474 /*
3475 * Boosting condition are:
3476 * 1. -rt task is running and holds mutex A
3477 * --> -dl task blocks on mutex A
3478 *
3479 * 2. -dl task is running and holds mutex A
3480 * --> -dl task blocks on mutex A and could preempt the
3481 * running task
3482 */
3483 if (dl_prio(prio)) {
3484 struct task_struct *pi_task = rt_mutex_get_top_task(p);
3485 if (!dl_prio(p->normal_prio) ||
3486 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
3487 p->dl.dl_boosted = 1;
3488 p->dl.dl_throttled = 0;
3489 enqueue_flag = ENQUEUE_REPLENISH;
3490 } else
3491 p->dl.dl_boosted = 0;
3492 p->sched_class = &dl_sched_class;
3493 } else if (rt_prio(prio)) {
3494 if (dl_prio(oldprio))
3495 p->dl.dl_boosted = 0;
3496 if (oldprio < prio)
3497 enqueue_flag = ENQUEUE_HEAD;
3498 p->sched_class = &rt_sched_class;
3499 } else {
3500 if (dl_prio(oldprio))
3501 p->dl.dl_boosted = 0;
3502 if (rt_prio(oldprio))
3503 p->rt.timeout = 0;
3504 p->sched_class = &fair_sched_class;
3505 }
3506
3507 p->prio = prio;
3508
3509 if (running)
3510 p->sched_class->set_curr_task(rq);
3511 if (queued)
3512 enqueue_task(rq, p, enqueue_flag);
3513
3514 check_class_changed(rq, p, prev_class, oldprio);
3515 out_unlock:
3516 __task_rq_unlock(rq);
3517 }
3518 #endif
3519
set_user_nice(struct task_struct * p,long nice)3520 void set_user_nice(struct task_struct *p, long nice)
3521 {
3522 int old_prio, delta, queued;
3523 unsigned long flags;
3524 struct rq *rq;
3525
3526 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
3527 return;
3528 /*
3529 * We have to be careful, if called from sys_setpriority(),
3530 * the task might be in the middle of scheduling on another CPU.
3531 */
3532 rq = task_rq_lock(p, &flags);
3533 /*
3534 * The RT priorities are set via sched_setscheduler(), but we still
3535 * allow the 'normal' nice value to be set - but as expected
3536 * it wont have any effect on scheduling until the task is
3537 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
3538 */
3539 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
3540 p->static_prio = NICE_TO_PRIO(nice);
3541 goto out_unlock;
3542 }
3543 queued = task_on_rq_queued(p);
3544 if (queued)
3545 dequeue_task(rq, p, 0);
3546
3547 p->static_prio = NICE_TO_PRIO(nice);
3548 set_load_weight(p);
3549 old_prio = p->prio;
3550 p->prio = effective_prio(p);
3551 delta = p->prio - old_prio;
3552
3553 if (queued) {
3554 enqueue_task(rq, p, 0);
3555 /*
3556 * If the task increased its priority or is running and
3557 * lowered its priority, then reschedule its CPU:
3558 */
3559 if (delta < 0 || (delta > 0 && task_running(rq, p)))
3560 resched_curr(rq);
3561 }
3562 out_unlock:
3563 task_rq_unlock(rq, p, &flags);
3564 }
3565 EXPORT_SYMBOL(set_user_nice);
3566
3567 /*
3568 * can_nice - check if a task can reduce its nice value
3569 * @p: task
3570 * @nice: nice value
3571 */
can_nice(const struct task_struct * p,const int nice)3572 int can_nice(const struct task_struct *p, const int nice)
3573 {
3574 /* convert nice value [19,-20] to rlimit style value [1,40] */
3575 int nice_rlim = nice_to_rlimit(nice);
3576
3577 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
3578 capable(CAP_SYS_NICE));
3579 }
3580
3581 #ifdef __ARCH_WANT_SYS_NICE
3582
3583 /*
3584 * sys_nice - change the priority of the current process.
3585 * @increment: priority increment
3586 *
3587 * sys_setpriority is a more generic, but much slower function that
3588 * does similar things.
3589 */
SYSCALL_DEFINE1(nice,int,increment)3590 SYSCALL_DEFINE1(nice, int, increment)
3591 {
3592 long nice, retval;
3593
3594 /*
3595 * Setpriority might change our priority at the same moment.
3596 * We don't have to worry. Conceptually one call occurs first
3597 * and we have a single winner.
3598 */
3599 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
3600 nice = task_nice(current) + increment;
3601
3602 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
3603 if (increment < 0 && !can_nice(current, nice))
3604 return -EPERM;
3605
3606 retval = security_task_setnice(current, nice);
3607 if (retval)
3608 return retval;
3609
3610 set_user_nice(current, nice);
3611 return 0;
3612 }
3613
3614 #endif
3615
3616 /**
3617 * task_prio - return the priority value of a given task.
3618 * @p: the task in question.
3619 *
3620 * Return: The priority value as seen by users in /proc.
3621 * RT tasks are offset by -200. Normal tasks are centered
3622 * around 0, value goes from -16 to +15.
3623 */
task_prio(const struct task_struct * p)3624 int task_prio(const struct task_struct *p)
3625 {
3626 return p->prio - MAX_RT_PRIO;
3627 }
3628
3629 /**
3630 * idle_cpu - is a given cpu idle currently?
3631 * @cpu: the processor in question.
3632 *
3633 * Return: 1 if the CPU is currently idle. 0 otherwise.
3634 */
idle_cpu(int cpu)3635 int idle_cpu(int cpu)
3636 {
3637 struct rq *rq = cpu_rq(cpu);
3638
3639 if (rq->curr != rq->idle)
3640 return 0;
3641
3642 if (rq->nr_running)
3643 return 0;
3644
3645 #ifdef CONFIG_SMP
3646 if (!llist_empty(&rq->wake_list))
3647 return 0;
3648 #endif
3649
3650 return 1;
3651 }
3652
3653 /**
3654 * idle_task - return the idle task for a given cpu.
3655 * @cpu: the processor in question.
3656 *
3657 * Return: The idle task for the cpu @cpu.
3658 */
idle_task(int cpu)3659 struct task_struct *idle_task(int cpu)
3660 {
3661 return cpu_rq(cpu)->idle;
3662 }
3663
3664 /**
3665 * find_process_by_pid - find a process with a matching PID value.
3666 * @pid: the pid in question.
3667 *
3668 * The task of @pid, if found. %NULL otherwise.
3669 */
find_process_by_pid(pid_t pid)3670 static struct task_struct *find_process_by_pid(pid_t pid)
3671 {
3672 return pid ? find_task_by_vpid(pid) : current;
3673 }
3674
3675 /*
3676 * This function initializes the sched_dl_entity of a newly becoming
3677 * SCHED_DEADLINE task.
3678 *
3679 * Only the static values are considered here, the actual runtime and the
3680 * absolute deadline will be properly calculated when the task is enqueued
3681 * for the first time with its new policy.
3682 */
3683 static void
__setparam_dl(struct task_struct * p,const struct sched_attr * attr)3684 __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3685 {
3686 struct sched_dl_entity *dl_se = &p->dl;
3687
3688 dl_se->dl_runtime = attr->sched_runtime;
3689 dl_se->dl_deadline = attr->sched_deadline;
3690 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3691 dl_se->flags = attr->sched_flags;
3692 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3693
3694 /*
3695 * Changing the parameters of a task is 'tricky' and we're not doing
3696 * the correct thing -- also see task_dead_dl() and switched_from_dl().
3697 *
3698 * What we SHOULD do is delay the bandwidth release until the 0-lag
3699 * point. This would include retaining the task_struct until that time
3700 * and change dl_overflow() to not immediately decrement the current
3701 * amount.
3702 *
3703 * Instead we retain the current runtime/deadline and let the new
3704 * parameters take effect after the current reservation period lapses.
3705 * This is safe (albeit pessimistic) because the 0-lag point is always
3706 * before the current scheduling deadline.
3707 *
3708 * We can still have temporary overloads because we do not delay the
3709 * change in bandwidth until that time; so admission control is
3710 * not on the safe side. It does however guarantee tasks will never
3711 * consume more than promised.
3712 */
3713 }
3714
3715 /*
3716 * sched_setparam() passes in -1 for its policy, to let the functions
3717 * it calls know not to change it.
3718 */
3719 #define SETPARAM_POLICY -1
3720
__setscheduler_params(struct task_struct * p,const struct sched_attr * attr)3721 static void __setscheduler_params(struct task_struct *p,
3722 const struct sched_attr *attr)
3723 {
3724 int policy = attr->sched_policy;
3725
3726 if (policy == SETPARAM_POLICY)
3727 policy = p->policy;
3728
3729 p->policy = policy;
3730
3731 if (dl_policy(policy))
3732 __setparam_dl(p, attr);
3733 else if (fair_policy(policy))
3734 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
3735
3736 /*
3737 * __sched_setscheduler() ensures attr->sched_priority == 0 when
3738 * !rt_policy. Always setting this ensures that things like
3739 * getparam()/getattr() don't report silly values for !rt tasks.
3740 */
3741 p->rt_priority = attr->sched_priority;
3742 p->normal_prio = normal_prio(p);
3743 set_load_weight(p);
3744 }
3745
3746 /* Actually do priority change: must hold pi & rq lock. */
__setscheduler(struct rq * rq,struct task_struct * p,const struct sched_attr * attr,bool keep_boost)3747 static void __setscheduler(struct rq *rq, struct task_struct *p,
3748 const struct sched_attr *attr, bool keep_boost)
3749 {
3750 __setscheduler_params(p, attr);
3751
3752 /*
3753 * Keep a potential priority boosting if called from
3754 * sched_setscheduler().
3755 */
3756 if (keep_boost)
3757 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3758 else
3759 p->prio = normal_prio(p);
3760
3761 if (dl_prio(p->prio))
3762 p->sched_class = &dl_sched_class;
3763 else if (rt_prio(p->prio))
3764 p->sched_class = &rt_sched_class;
3765 else
3766 p->sched_class = &fair_sched_class;
3767 }
3768
3769 static void
__getparam_dl(struct task_struct * p,struct sched_attr * attr)3770 __getparam_dl(struct task_struct *p, struct sched_attr *attr)
3771 {
3772 struct sched_dl_entity *dl_se = &p->dl;
3773
3774 attr->sched_priority = p->rt_priority;
3775 attr->sched_runtime = dl_se->dl_runtime;
3776 attr->sched_deadline = dl_se->dl_deadline;
3777 attr->sched_period = dl_se->dl_period;
3778 attr->sched_flags = dl_se->flags;
3779 }
3780
3781 /*
3782 * This function validates the new parameters of a -deadline task.
3783 * We ask for the deadline not being zero, and greater or equal
3784 * than the runtime, as well as the period of being zero or
3785 * greater than deadline. Furthermore, we have to be sure that
3786 * user parameters are above the internal resolution of 1us (we
3787 * check sched_runtime only since it is always the smaller one) and
3788 * below 2^63 ns (we have to check both sched_deadline and
3789 * sched_period, as the latter can be zero).
3790 */
3791 static bool
__checkparam_dl(const struct sched_attr * attr)3792 __checkparam_dl(const struct sched_attr *attr)
3793 {
3794 /* deadline != 0 */
3795 if (attr->sched_deadline == 0)
3796 return false;
3797
3798 /*
3799 * Since we truncate DL_SCALE bits, make sure we're at least
3800 * that big.
3801 */
3802 if (attr->sched_runtime < (1ULL << DL_SCALE))
3803 return false;
3804
3805 /*
3806 * Since we use the MSB for wrap-around and sign issues, make
3807 * sure it's not set (mind that period can be equal to zero).
3808 */
3809 if (attr->sched_deadline & (1ULL << 63) ||
3810 attr->sched_period & (1ULL << 63))
3811 return false;
3812
3813 /* runtime <= deadline <= period (if period != 0) */
3814 if ((attr->sched_period != 0 &&
3815 attr->sched_period < attr->sched_deadline) ||
3816 attr->sched_deadline < attr->sched_runtime)
3817 return false;
3818
3819 return true;
3820 }
3821
3822 /*
3823 * check the target process has a UID that matches the current process's
3824 */
check_same_owner(struct task_struct * p)3825 static bool check_same_owner(struct task_struct *p)
3826 {
3827 const struct cred *cred = current_cred(), *pcred;
3828 bool match;
3829
3830 rcu_read_lock();
3831 pcred = __task_cred(p);
3832 match = (uid_eq(cred->euid, pcred->euid) ||
3833 uid_eq(cred->euid, pcred->uid));
3834 rcu_read_unlock();
3835 return match;
3836 }
3837
__sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,bool user)3838 static int __sched_setscheduler(struct task_struct *p,
3839 const struct sched_attr *attr,
3840 bool user)
3841 {
3842 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3843 MAX_RT_PRIO - 1 - attr->sched_priority;
3844 int retval, oldprio, oldpolicy = -1, queued, running;
3845 int new_effective_prio, policy = attr->sched_policy;
3846 unsigned long flags;
3847 const struct sched_class *prev_class;
3848 struct rq *rq;
3849 int reset_on_fork;
3850
3851 /* may grab non-irq protected spin_locks */
3852 BUG_ON(in_interrupt());
3853 recheck:
3854 /* double check policy once rq lock held */
3855 if (policy < 0) {
3856 reset_on_fork = p->sched_reset_on_fork;
3857 policy = oldpolicy = p->policy;
3858 } else {
3859 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
3860
3861 if (policy != SCHED_DEADLINE &&
3862 policy != SCHED_FIFO && policy != SCHED_RR &&
3863 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3864 policy != SCHED_IDLE)
3865 return -EINVAL;
3866 }
3867
3868 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
3869 return -EINVAL;
3870
3871 /*
3872 * Valid priorities for SCHED_FIFO and SCHED_RR are
3873 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3874 * SCHED_BATCH and SCHED_IDLE is 0.
3875 */
3876 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
3877 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
3878 return -EINVAL;
3879 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
3880 (rt_policy(policy) != (attr->sched_priority != 0)))
3881 return -EINVAL;
3882
3883 /*
3884 * Allow unprivileged RT tasks to decrease priority:
3885 */
3886 if (user && !capable(CAP_SYS_NICE)) {
3887 if (fair_policy(policy)) {
3888 if (attr->sched_nice < task_nice(p) &&
3889 !can_nice(p, attr->sched_nice))
3890 return -EPERM;
3891 }
3892
3893 if (rt_policy(policy)) {
3894 unsigned long rlim_rtprio =
3895 task_rlimit(p, RLIMIT_RTPRIO);
3896
3897 /* can't set/change the rt policy */
3898 if (policy != p->policy && !rlim_rtprio)
3899 return -EPERM;
3900
3901 /* can't increase priority */
3902 if (attr->sched_priority > p->rt_priority &&
3903 attr->sched_priority > rlim_rtprio)
3904 return -EPERM;
3905 }
3906
3907 /*
3908 * Can't set/change SCHED_DEADLINE policy at all for now
3909 * (safest behavior); in the future we would like to allow
3910 * unprivileged DL tasks to increase their relative deadline
3911 * or reduce their runtime (both ways reducing utilization)
3912 */
3913 if (dl_policy(policy))
3914 return -EPERM;
3915
3916 /*
3917 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3918 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
3919 */
3920 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
3921 if (!can_nice(p, task_nice(p)))
3922 return -EPERM;
3923 }
3924
3925 /* can't change other user's priorities */
3926 if (!check_same_owner(p))
3927 return -EPERM;
3928
3929 /* Normal users shall not reset the sched_reset_on_fork flag */
3930 if (p->sched_reset_on_fork && !reset_on_fork)
3931 return -EPERM;
3932 }
3933
3934 if (user) {
3935 retval = security_task_setscheduler(p);
3936 if (retval)
3937 return retval;
3938 }
3939
3940 /*
3941 * make sure no PI-waiters arrive (or leave) while we are
3942 * changing the priority of the task:
3943 *
3944 * To be able to change p->policy safely, the appropriate
3945 * runqueue lock must be held.
3946 */
3947 rq = task_rq_lock(p, &flags);
3948
3949 /*
3950 * Changing the policy of the stop threads its a very bad idea
3951 */
3952 if (p == rq->stop) {
3953 task_rq_unlock(rq, p, &flags);
3954 return -EINVAL;
3955 }
3956
3957 /*
3958 * If not changing anything there's no need to proceed further,
3959 * but store a possible modification of reset_on_fork.
3960 */
3961 if (unlikely(policy == p->policy)) {
3962 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
3963 goto change;
3964 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3965 goto change;
3966 if (dl_policy(policy))
3967 goto change;
3968
3969 p->sched_reset_on_fork = reset_on_fork;
3970 task_rq_unlock(rq, p, &flags);
3971 return 0;
3972 }
3973 change:
3974
3975 if (user) {
3976 #ifdef CONFIG_RT_GROUP_SCHED
3977 /*
3978 * Do not allow realtime tasks into groups that have no runtime
3979 * assigned.
3980 */
3981 if (rt_bandwidth_enabled() && rt_policy(policy) &&
3982 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3983 !task_group_is_autogroup(task_group(p))) {
3984 task_rq_unlock(rq, p, &flags);
3985 return -EPERM;
3986 }
3987 #endif
3988 #ifdef CONFIG_SMP
3989 if (dl_bandwidth_enabled() && dl_policy(policy)) {
3990 cpumask_t *span = rq->rd->span;
3991
3992 /*
3993 * Don't allow tasks with an affinity mask smaller than
3994 * the entire root_domain to become SCHED_DEADLINE. We
3995 * will also fail if there's no bandwidth available.
3996 */
3997 if (!cpumask_subset(span, &p->cpus_allowed) ||
3998 rq->rd->dl_bw.bw == 0) {
3999 task_rq_unlock(rq, p, &flags);
4000 return -EPERM;
4001 }
4002 }
4003 #endif
4004 }
4005
4006 /* recheck policy now with rq lock held */
4007 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4008 policy = oldpolicy = -1;
4009 task_rq_unlock(rq, p, &flags);
4010 goto recheck;
4011 }
4012
4013 /*
4014 * If setscheduling to SCHED_DEADLINE (or changing the parameters
4015 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
4016 * is available.
4017 */
4018 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
4019 task_rq_unlock(rq, p, &flags);
4020 return -EBUSY;
4021 }
4022
4023 p->sched_reset_on_fork = reset_on_fork;
4024 oldprio = p->prio;
4025
4026 /*
4027 * Take priority boosted tasks into account. If the new
4028 * effective priority is unchanged, we just store the new
4029 * normal parameters and do not touch the scheduler class and
4030 * the runqueue. This will be done when the task deboost
4031 * itself.
4032 */
4033 new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
4034 if (new_effective_prio == oldprio) {
4035 __setscheduler_params(p, attr);
4036 task_rq_unlock(rq, p, &flags);
4037 return 0;
4038 }
4039
4040 queued = task_on_rq_queued(p);
4041 running = task_current(rq, p);
4042 if (queued)
4043 dequeue_task(rq, p, 0);
4044 if (running)
4045 put_prev_task(rq, p);
4046
4047 prev_class = p->sched_class;
4048 __setscheduler(rq, p, attr, true);
4049
4050 if (running)
4051 p->sched_class->set_curr_task(rq);
4052 if (queued) {
4053 /*
4054 * We enqueue to tail when the priority of a task is
4055 * increased (user space view).
4056 */
4057 enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
4058 }
4059
4060 check_class_changed(rq, p, prev_class, oldprio);
4061 task_rq_unlock(rq, p, &flags);
4062
4063 rt_mutex_adjust_pi(p);
4064
4065 return 0;
4066 }
4067
_sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param,bool check)4068 static int _sched_setscheduler(struct task_struct *p, int policy,
4069 const struct sched_param *param, bool check)
4070 {
4071 struct sched_attr attr = {
4072 .sched_policy = policy,
4073 .sched_priority = param->sched_priority,
4074 .sched_nice = PRIO_TO_NICE(p->static_prio),
4075 };
4076
4077 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
4078 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
4079 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4080 policy &= ~SCHED_RESET_ON_FORK;
4081 attr.sched_policy = policy;
4082 }
4083
4084 return __sched_setscheduler(p, &attr, check);
4085 }
4086 /**
4087 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4088 * @p: the task in question.
4089 * @policy: new policy.
4090 * @param: structure containing the new RT priority.
4091 *
4092 * Return: 0 on success. An error code otherwise.
4093 *
4094 * NOTE that the task may be already dead.
4095 */
sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param)4096 int sched_setscheduler(struct task_struct *p, int policy,
4097 const struct sched_param *param)
4098 {
4099 return _sched_setscheduler(p, policy, param, true);
4100 }
4101 EXPORT_SYMBOL_GPL(sched_setscheduler);
4102
sched_setattr(struct task_struct * p,const struct sched_attr * attr)4103 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
4104 {
4105 return __sched_setscheduler(p, attr, true);
4106 }
4107 EXPORT_SYMBOL_GPL(sched_setattr);
4108
4109 /**
4110 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4111 * @p: the task in question.
4112 * @policy: new policy.
4113 * @param: structure containing the new RT priority.
4114 *
4115 * Just like sched_setscheduler, only don't bother checking if the
4116 * current context has permission. For example, this is needed in
4117 * stop_machine(): we create temporary high priority worker threads,
4118 * but our caller might not have that capability.
4119 *
4120 * Return: 0 on success. An error code otherwise.
4121 */
sched_setscheduler_nocheck(struct task_struct * p,int policy,const struct sched_param * param)4122 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4123 const struct sched_param *param)
4124 {
4125 return _sched_setscheduler(p, policy, param, false);
4126 }
4127 EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
4128
4129 static int
do_sched_setscheduler(pid_t pid,int policy,struct sched_param __user * param)4130 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4131 {
4132 struct sched_param lparam;
4133 struct task_struct *p;
4134 int retval;
4135
4136 if (!param || pid < 0)
4137 return -EINVAL;
4138 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4139 return -EFAULT;
4140
4141 rcu_read_lock();
4142 retval = -ESRCH;
4143 p = find_process_by_pid(pid);
4144 if (p != NULL)
4145 retval = sched_setscheduler(p, policy, &lparam);
4146 rcu_read_unlock();
4147
4148 return retval;
4149 }
4150
4151 /*
4152 * Mimics kernel/events/core.c perf_copy_attr().
4153 */
sched_copy_attr(struct sched_attr __user * uattr,struct sched_attr * attr)4154 static int sched_copy_attr(struct sched_attr __user *uattr,
4155 struct sched_attr *attr)
4156 {
4157 u32 size;
4158 int ret;
4159
4160 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
4161 return -EFAULT;
4162
4163 /*
4164 * zero the full structure, so that a short copy will be nice.
4165 */
4166 memset(attr, 0, sizeof(*attr));
4167
4168 ret = get_user(size, &uattr->size);
4169 if (ret)
4170 return ret;
4171
4172 if (size > PAGE_SIZE) /* silly large */
4173 goto err_size;
4174
4175 if (!size) /* abi compat */
4176 size = SCHED_ATTR_SIZE_VER0;
4177
4178 if (size < SCHED_ATTR_SIZE_VER0)
4179 goto err_size;
4180
4181 /*
4182 * If we're handed a bigger struct than we know of,
4183 * ensure all the unknown bits are 0 - i.e. new
4184 * user-space does not rely on any kernel feature
4185 * extensions we dont know about yet.
4186 */
4187 if (size > sizeof(*attr)) {
4188 unsigned char __user *addr;
4189 unsigned char __user *end;
4190 unsigned char val;
4191
4192 addr = (void __user *)uattr + sizeof(*attr);
4193 end = (void __user *)uattr + size;
4194
4195 for (; addr < end; addr++) {
4196 ret = get_user(val, addr);
4197 if (ret)
4198 return ret;
4199 if (val)
4200 goto err_size;
4201 }
4202 size = sizeof(*attr);
4203 }
4204
4205 ret = copy_from_user(attr, uattr, size);
4206 if (ret)
4207 return -EFAULT;
4208
4209 /*
4210 * XXX: do we want to be lenient like existing syscalls; or do we want
4211 * to be strict and return an error on out-of-bounds values?
4212 */
4213 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
4214
4215 return 0;
4216
4217 err_size:
4218 put_user(sizeof(*attr), &uattr->size);
4219 return -E2BIG;
4220 }
4221
4222 /**
4223 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4224 * @pid: the pid in question.
4225 * @policy: new policy.
4226 * @param: structure containing the new RT priority.
4227 *
4228 * Return: 0 on success. An error code otherwise.
4229 */
SYSCALL_DEFINE3(sched_setscheduler,pid_t,pid,int,policy,struct sched_param __user *,param)4230 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4231 struct sched_param __user *, param)
4232 {
4233 /* negative values for policy are not valid */
4234 if (policy < 0)
4235 return -EINVAL;
4236
4237 return do_sched_setscheduler(pid, policy, param);
4238 }
4239
4240 /**
4241 * sys_sched_setparam - set/change the RT priority of a thread
4242 * @pid: the pid in question.
4243 * @param: structure containing the new RT priority.
4244 *
4245 * Return: 0 on success. An error code otherwise.
4246 */
SYSCALL_DEFINE2(sched_setparam,pid_t,pid,struct sched_param __user *,param)4247 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
4248 {
4249 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
4250 }
4251
4252 /**
4253 * sys_sched_setattr - same as above, but with extended sched_attr
4254 * @pid: the pid in question.
4255 * @uattr: structure containing the extended parameters.
4256 * @flags: for future extension.
4257 */
SYSCALL_DEFINE3(sched_setattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,flags)4258 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
4259 unsigned int, flags)
4260 {
4261 struct sched_attr attr;
4262 struct task_struct *p;
4263 int retval;
4264
4265 if (!uattr || pid < 0 || flags)
4266 return -EINVAL;
4267
4268 retval = sched_copy_attr(uattr, &attr);
4269 if (retval)
4270 return retval;
4271
4272 if ((int)attr.sched_policy < 0)
4273 return -EINVAL;
4274
4275 rcu_read_lock();
4276 retval = -ESRCH;
4277 p = find_process_by_pid(pid);
4278 if (p != NULL)
4279 retval = sched_setattr(p, &attr);
4280 rcu_read_unlock();
4281
4282 return retval;
4283 }
4284
4285 /**
4286 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4287 * @pid: the pid in question.
4288 *
4289 * Return: On success, the policy of the thread. Otherwise, a negative error
4290 * code.
4291 */
SYSCALL_DEFINE1(sched_getscheduler,pid_t,pid)4292 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
4293 {
4294 struct task_struct *p;
4295 int retval;
4296
4297 if (pid < 0)
4298 return -EINVAL;
4299
4300 retval = -ESRCH;
4301 rcu_read_lock();
4302 p = find_process_by_pid(pid);
4303 if (p) {
4304 retval = security_task_getscheduler(p);
4305 if (!retval)
4306 retval = p->policy
4307 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
4308 }
4309 rcu_read_unlock();
4310 return retval;
4311 }
4312
4313 /**
4314 * sys_sched_getparam - get the RT priority of a thread
4315 * @pid: the pid in question.
4316 * @param: structure containing the RT priority.
4317 *
4318 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
4319 * code.
4320 */
SYSCALL_DEFINE2(sched_getparam,pid_t,pid,struct sched_param __user *,param)4321 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
4322 {
4323 struct sched_param lp = { .sched_priority = 0 };
4324 struct task_struct *p;
4325 int retval;
4326
4327 if (!param || pid < 0)
4328 return -EINVAL;
4329
4330 rcu_read_lock();
4331 p = find_process_by_pid(pid);
4332 retval = -ESRCH;
4333 if (!p)
4334 goto out_unlock;
4335
4336 retval = security_task_getscheduler(p);
4337 if (retval)
4338 goto out_unlock;
4339
4340 if (task_has_rt_policy(p))
4341 lp.sched_priority = p->rt_priority;
4342 rcu_read_unlock();
4343
4344 /*
4345 * This one might sleep, we cannot do it with a spinlock held ...
4346 */
4347 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4348
4349 return retval;
4350
4351 out_unlock:
4352 rcu_read_unlock();
4353 return retval;
4354 }
4355
sched_read_attr(struct sched_attr __user * uattr,struct sched_attr * attr,unsigned int usize)4356 static int sched_read_attr(struct sched_attr __user *uattr,
4357 struct sched_attr *attr,
4358 unsigned int usize)
4359 {
4360 int ret;
4361
4362 if (!access_ok(VERIFY_WRITE, uattr, usize))
4363 return -EFAULT;
4364
4365 /*
4366 * If we're handed a smaller struct than we know of,
4367 * ensure all the unknown bits are 0 - i.e. old
4368 * user-space does not get uncomplete information.
4369 */
4370 if (usize < sizeof(*attr)) {
4371 unsigned char *addr;
4372 unsigned char *end;
4373
4374 addr = (void *)attr + usize;
4375 end = (void *)attr + sizeof(*attr);
4376
4377 for (; addr < end; addr++) {
4378 if (*addr)
4379 return -EFBIG;
4380 }
4381
4382 attr->size = usize;
4383 }
4384
4385 ret = copy_to_user(uattr, attr, attr->size);
4386 if (ret)
4387 return -EFAULT;
4388
4389 return 0;
4390 }
4391
4392 /**
4393 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
4394 * @pid: the pid in question.
4395 * @uattr: structure containing the extended parameters.
4396 * @size: sizeof(attr) for fwd/bwd comp.
4397 * @flags: for future extension.
4398 */
SYSCALL_DEFINE4(sched_getattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,size,unsigned int,flags)4399 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
4400 unsigned int, size, unsigned int, flags)
4401 {
4402 struct sched_attr attr = {
4403 .size = sizeof(struct sched_attr),
4404 };
4405 struct task_struct *p;
4406 int retval;
4407
4408 if (!uattr || pid < 0 || size > PAGE_SIZE ||
4409 size < SCHED_ATTR_SIZE_VER0 || flags)
4410 return -EINVAL;
4411
4412 rcu_read_lock();
4413 p = find_process_by_pid(pid);
4414 retval = -ESRCH;
4415 if (!p)
4416 goto out_unlock;
4417
4418 retval = security_task_getscheduler(p);
4419 if (retval)
4420 goto out_unlock;
4421
4422 attr.sched_policy = p->policy;
4423 if (p->sched_reset_on_fork)
4424 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4425 if (task_has_dl_policy(p))
4426 __getparam_dl(p, &attr);
4427 else if (task_has_rt_policy(p))
4428 attr.sched_priority = p->rt_priority;
4429 else
4430 attr.sched_nice = task_nice(p);
4431
4432 rcu_read_unlock();
4433
4434 retval = sched_read_attr(uattr, &attr, size);
4435 return retval;
4436
4437 out_unlock:
4438 rcu_read_unlock();
4439 return retval;
4440 }
4441
sched_setaffinity(pid_t pid,const struct cpumask * in_mask)4442 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4443 {
4444 cpumask_var_t cpus_allowed, new_mask;
4445 struct task_struct *p;
4446 int retval;
4447
4448 rcu_read_lock();
4449
4450 p = find_process_by_pid(pid);
4451 if (!p) {
4452 rcu_read_unlock();
4453 return -ESRCH;
4454 }
4455
4456 /* Prevent p going away */
4457 get_task_struct(p);
4458 rcu_read_unlock();
4459
4460 if (p->flags & PF_NO_SETAFFINITY) {
4461 retval = -EINVAL;
4462 goto out_put_task;
4463 }
4464 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4465 retval = -ENOMEM;
4466 goto out_put_task;
4467 }
4468 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4469 retval = -ENOMEM;
4470 goto out_free_cpus_allowed;
4471 }
4472 retval = -EPERM;
4473 if (!check_same_owner(p)) {
4474 rcu_read_lock();
4475 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
4476 rcu_read_unlock();
4477 goto out_free_new_mask;
4478 }
4479 rcu_read_unlock();
4480 }
4481
4482 retval = security_task_setscheduler(p);
4483 if (retval)
4484 goto out_free_new_mask;
4485
4486
4487 cpuset_cpus_allowed(p, cpus_allowed);
4488 cpumask_and(new_mask, in_mask, cpus_allowed);
4489
4490 /*
4491 * Since bandwidth control happens on root_domain basis,
4492 * if admission test is enabled, we only admit -deadline
4493 * tasks allowed to run on all the CPUs in the task's
4494 * root_domain.
4495 */
4496 #ifdef CONFIG_SMP
4497 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
4498 rcu_read_lock();
4499 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
4500 retval = -EBUSY;
4501 rcu_read_unlock();
4502 goto out_free_new_mask;
4503 }
4504 rcu_read_unlock();
4505 }
4506 #endif
4507 again:
4508 retval = __set_cpus_allowed_ptr(p, new_mask, true);
4509
4510 if (!retval) {
4511 cpuset_cpus_allowed(p, cpus_allowed);
4512 if (!cpumask_subset(new_mask, cpus_allowed)) {
4513 /*
4514 * We must have raced with a concurrent cpuset
4515 * update. Just reset the cpus_allowed to the
4516 * cpuset's cpus_allowed
4517 */
4518 cpumask_copy(new_mask, cpus_allowed);
4519 goto again;
4520 }
4521 }
4522 out_free_new_mask:
4523 free_cpumask_var(new_mask);
4524 out_free_cpus_allowed:
4525 free_cpumask_var(cpus_allowed);
4526 out_put_task:
4527 put_task_struct(p);
4528 return retval;
4529 }
4530
get_user_cpu_mask(unsigned long __user * user_mask_ptr,unsigned len,struct cpumask * new_mask)4531 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4532 struct cpumask *new_mask)
4533 {
4534 if (len < cpumask_size())
4535 cpumask_clear(new_mask);
4536 else if (len > cpumask_size())
4537 len = cpumask_size();
4538
4539 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4540 }
4541
4542 /**
4543 * sys_sched_setaffinity - set the cpu affinity of a process
4544 * @pid: pid of the process
4545 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4546 * @user_mask_ptr: user-space pointer to the new cpu mask
4547 *
4548 * Return: 0 on success. An error code otherwise.
4549 */
SYSCALL_DEFINE3(sched_setaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)4550 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4551 unsigned long __user *, user_mask_ptr)
4552 {
4553 cpumask_var_t new_mask;
4554 int retval;
4555
4556 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4557 return -ENOMEM;
4558
4559 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4560 if (retval == 0)
4561 retval = sched_setaffinity(pid, new_mask);
4562 free_cpumask_var(new_mask);
4563 return retval;
4564 }
4565
sched_getaffinity(pid_t pid,struct cpumask * mask)4566 long sched_getaffinity(pid_t pid, struct cpumask *mask)
4567 {
4568 struct task_struct *p;
4569 unsigned long flags;
4570 int retval;
4571
4572 rcu_read_lock();
4573
4574 retval = -ESRCH;
4575 p = find_process_by_pid(pid);
4576 if (!p)
4577 goto out_unlock;
4578
4579 retval = security_task_getscheduler(p);
4580 if (retval)
4581 goto out_unlock;
4582
4583 raw_spin_lock_irqsave(&p->pi_lock, flags);
4584 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
4585 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4586
4587 out_unlock:
4588 rcu_read_unlock();
4589
4590 return retval;
4591 }
4592
4593 /**
4594 * sys_sched_getaffinity - get the cpu affinity of a process
4595 * @pid: pid of the process
4596 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4597 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4598 *
4599 * Return: 0 on success. An error code otherwise.
4600 */
SYSCALL_DEFINE3(sched_getaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)4601 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4602 unsigned long __user *, user_mask_ptr)
4603 {
4604 int ret;
4605 cpumask_var_t mask;
4606
4607 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4608 return -EINVAL;
4609 if (len & (sizeof(unsigned long)-1))
4610 return -EINVAL;
4611
4612 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4613 return -ENOMEM;
4614
4615 ret = sched_getaffinity(pid, mask);
4616 if (ret == 0) {
4617 size_t retlen = min_t(size_t, len, cpumask_size());
4618
4619 if (copy_to_user(user_mask_ptr, mask, retlen))
4620 ret = -EFAULT;
4621 else
4622 ret = retlen;
4623 }
4624 free_cpumask_var(mask);
4625
4626 return ret;
4627 }
4628
4629 /**
4630 * sys_sched_yield - yield the current processor to other threads.
4631 *
4632 * This function yields the current CPU to other tasks. If there are no
4633 * other threads running on this CPU then this function will return.
4634 *
4635 * Return: 0.
4636 */
SYSCALL_DEFINE0(sched_yield)4637 SYSCALL_DEFINE0(sched_yield)
4638 {
4639 struct rq *rq = this_rq_lock();
4640
4641 schedstat_inc(rq, yld_count);
4642 current->sched_class->yield_task(rq);
4643
4644 /*
4645 * Since we are going to call schedule() anyway, there's
4646 * no need to preempt or enable interrupts:
4647 */
4648 __release(rq->lock);
4649 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4650 do_raw_spin_unlock(&rq->lock);
4651 sched_preempt_enable_no_resched();
4652
4653 schedule();
4654
4655 return 0;
4656 }
4657
__cond_resched(void)4658 static void __cond_resched(void)
4659 {
4660 __preempt_count_add(PREEMPT_ACTIVE);
4661 __schedule();
4662 __preempt_count_sub(PREEMPT_ACTIVE);
4663 }
4664
_cond_resched(void)4665 int __sched _cond_resched(void)
4666 {
4667 if (should_resched()) {
4668 __cond_resched();
4669 return 1;
4670 }
4671 return 0;
4672 }
4673 EXPORT_SYMBOL(_cond_resched);
4674
4675 /*
4676 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
4677 * call schedule, and on return reacquire the lock.
4678 *
4679 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
4680 * operations here to prevent schedule() from being called twice (once via
4681 * spin_unlock(), once by hand).
4682 */
__cond_resched_lock(spinlock_t * lock)4683 int __cond_resched_lock(spinlock_t *lock)
4684 {
4685 int resched = should_resched();
4686 int ret = 0;
4687
4688 lockdep_assert_held(lock);
4689
4690 if (spin_needbreak(lock) || resched) {
4691 spin_unlock(lock);
4692 if (resched)
4693 __cond_resched();
4694 else
4695 cpu_relax();
4696 ret = 1;
4697 spin_lock(lock);
4698 }
4699 return ret;
4700 }
4701 EXPORT_SYMBOL(__cond_resched_lock);
4702
__cond_resched_softirq(void)4703 int __sched __cond_resched_softirq(void)
4704 {
4705 BUG_ON(!in_softirq());
4706
4707 if (should_resched()) {
4708 local_bh_enable();
4709 __cond_resched();
4710 local_bh_disable();
4711 return 1;
4712 }
4713 return 0;
4714 }
4715 EXPORT_SYMBOL(__cond_resched_softirq);
4716
4717 /**
4718 * yield - yield the current processor to other threads.
4719 *
4720 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4721 *
4722 * The scheduler is at all times free to pick the calling task as the most
4723 * eligible task to run, if removing the yield() call from your code breaks
4724 * it, its already broken.
4725 *
4726 * Typical broken usage is:
4727 *
4728 * while (!event)
4729 * yield();
4730 *
4731 * where one assumes that yield() will let 'the other' process run that will
4732 * make event true. If the current task is a SCHED_FIFO task that will never
4733 * happen. Never use yield() as a progress guarantee!!
4734 *
4735 * If you want to use yield() to wait for something, use wait_event().
4736 * If you want to use yield() to be 'nice' for others, use cond_resched().
4737 * If you still want to use yield(), do not!
4738 */
yield(void)4739 void __sched yield(void)
4740 {
4741 set_current_state(TASK_RUNNING);
4742 sys_sched_yield();
4743 }
4744 EXPORT_SYMBOL(yield);
4745
4746 /**
4747 * yield_to - yield the current processor to another thread in
4748 * your thread group, or accelerate that thread toward the
4749 * processor it's on.
4750 * @p: target task
4751 * @preempt: whether task preemption is allowed or not
4752 *
4753 * It's the caller's job to ensure that the target task struct
4754 * can't go away on us before we can do any checks.
4755 *
4756 * Return:
4757 * true (>0) if we indeed boosted the target task.
4758 * false (0) if we failed to boost the target.
4759 * -ESRCH if there's no task to yield to.
4760 */
yield_to(struct task_struct * p,bool preempt)4761 int __sched yield_to(struct task_struct *p, bool preempt)
4762 {
4763 struct task_struct *curr = current;
4764 struct rq *rq, *p_rq;
4765 unsigned long flags;
4766 int yielded = 0;
4767
4768 local_irq_save(flags);
4769 rq = this_rq();
4770
4771 again:
4772 p_rq = task_rq(p);
4773 /*
4774 * If we're the only runnable task on the rq and target rq also
4775 * has only one task, there's absolutely no point in yielding.
4776 */
4777 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
4778 yielded = -ESRCH;
4779 goto out_irq;
4780 }
4781
4782 double_rq_lock(rq, p_rq);
4783 if (task_rq(p) != p_rq) {
4784 double_rq_unlock(rq, p_rq);
4785 goto again;
4786 }
4787
4788 if (!curr->sched_class->yield_to_task)
4789 goto out_unlock;
4790
4791 if (curr->sched_class != p->sched_class)
4792 goto out_unlock;
4793
4794 if (task_running(p_rq, p) || p->state)
4795 goto out_unlock;
4796
4797 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
4798 if (yielded) {
4799 schedstat_inc(rq, yld_count);
4800 /*
4801 * Make p's CPU reschedule; pick_next_entity takes care of
4802 * fairness.
4803 */
4804 if (preempt && rq != p_rq)
4805 resched_curr(p_rq);
4806 }
4807
4808 out_unlock:
4809 double_rq_unlock(rq, p_rq);
4810 out_irq:
4811 local_irq_restore(flags);
4812
4813 if (yielded > 0)
4814 schedule();
4815
4816 return yielded;
4817 }
4818 EXPORT_SYMBOL_GPL(yield_to);
4819
4820 /*
4821 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
4822 * that process accounting knows that this is a task in IO wait state.
4823 */
io_schedule_timeout(long timeout)4824 long __sched io_schedule_timeout(long timeout)
4825 {
4826 int old_iowait = current->in_iowait;
4827 struct rq *rq;
4828 long ret;
4829
4830 current->in_iowait = 1;
4831 blk_schedule_flush_plug(current);
4832
4833 delayacct_blkio_start();
4834 rq = raw_rq();
4835 atomic_inc(&rq->nr_iowait);
4836 ret = schedule_timeout(timeout);
4837 current->in_iowait = old_iowait;
4838 atomic_dec(&rq->nr_iowait);
4839 delayacct_blkio_end();
4840
4841 return ret;
4842 }
4843 EXPORT_SYMBOL(io_schedule_timeout);
4844
4845 /**
4846 * sys_sched_get_priority_max - return maximum RT priority.
4847 * @policy: scheduling class.
4848 *
4849 * Return: On success, this syscall returns the maximum
4850 * rt_priority that can be used by a given scheduling class.
4851 * On failure, a negative error code is returned.
4852 */
SYSCALL_DEFINE1(sched_get_priority_max,int,policy)4853 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
4854 {
4855 int ret = -EINVAL;
4856
4857 switch (policy) {
4858 case SCHED_FIFO:
4859 case SCHED_RR:
4860 ret = MAX_USER_RT_PRIO-1;
4861 break;
4862 case SCHED_DEADLINE:
4863 case SCHED_NORMAL:
4864 case SCHED_BATCH:
4865 case SCHED_IDLE:
4866 ret = 0;
4867 break;
4868 }
4869 return ret;
4870 }
4871
4872 /**
4873 * sys_sched_get_priority_min - return minimum RT priority.
4874 * @policy: scheduling class.
4875 *
4876 * Return: On success, this syscall returns the minimum
4877 * rt_priority that can be used by a given scheduling class.
4878 * On failure, a negative error code is returned.
4879 */
SYSCALL_DEFINE1(sched_get_priority_min,int,policy)4880 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4881 {
4882 int ret = -EINVAL;
4883
4884 switch (policy) {
4885 case SCHED_FIFO:
4886 case SCHED_RR:
4887 ret = 1;
4888 break;
4889 case SCHED_DEADLINE:
4890 case SCHED_NORMAL:
4891 case SCHED_BATCH:
4892 case SCHED_IDLE:
4893 ret = 0;
4894 }
4895 return ret;
4896 }
4897
4898 /**
4899 * sys_sched_rr_get_interval - return the default timeslice of a process.
4900 * @pid: pid of the process.
4901 * @interval: userspace pointer to the timeslice value.
4902 *
4903 * this syscall writes the default timeslice value of a given process
4904 * into the user-space timespec buffer. A value of '0' means infinity.
4905 *
4906 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4907 * an error code.
4908 */
SYSCALL_DEFINE2(sched_rr_get_interval,pid_t,pid,struct timespec __user *,interval)4909 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4910 struct timespec __user *, interval)
4911 {
4912 struct task_struct *p;
4913 unsigned int time_slice;
4914 unsigned long flags;
4915 struct rq *rq;
4916 int retval;
4917 struct timespec t;
4918
4919 if (pid < 0)
4920 return -EINVAL;
4921
4922 retval = -ESRCH;
4923 rcu_read_lock();
4924 p = find_process_by_pid(pid);
4925 if (!p)
4926 goto out_unlock;
4927
4928 retval = security_task_getscheduler(p);
4929 if (retval)
4930 goto out_unlock;
4931
4932 rq = task_rq_lock(p, &flags);
4933 time_slice = 0;
4934 if (p->sched_class->get_rr_interval)
4935 time_slice = p->sched_class->get_rr_interval(rq, p);
4936 task_rq_unlock(rq, p, &flags);
4937
4938 rcu_read_unlock();
4939 jiffies_to_timespec(time_slice, &t);
4940 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
4941 return retval;
4942
4943 out_unlock:
4944 rcu_read_unlock();
4945 return retval;
4946 }
4947
4948 static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
4949
sched_show_task(struct task_struct * p)4950 void sched_show_task(struct task_struct *p)
4951 {
4952 unsigned long free = 0;
4953 int ppid;
4954 unsigned state;
4955
4956 state = p->state ? __ffs(p->state) + 1 : 0;
4957 printk(KERN_INFO "%-15.15s %c", p->comm,
4958 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4959 #if BITS_PER_LONG == 32
4960 if (state == TASK_RUNNING)
4961 printk(KERN_CONT " running ");
4962 else
4963 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
4964 #else
4965 if (state == TASK_RUNNING)
4966 printk(KERN_CONT " running task ");
4967 else
4968 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
4969 #endif
4970 #ifdef CONFIG_DEBUG_STACK_USAGE
4971 free = stack_not_used(p);
4972 #endif
4973 rcu_read_lock();
4974 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4975 rcu_read_unlock();
4976 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4977 task_pid_nr(p), ppid,
4978 (unsigned long)task_thread_info(p)->flags);
4979
4980 print_worker_info(KERN_INFO, p);
4981 show_stack(p, NULL);
4982 }
4983
show_state_filter(unsigned long state_filter)4984 void show_state_filter(unsigned long state_filter)
4985 {
4986 struct task_struct *g, *p;
4987
4988 #if BITS_PER_LONG == 32
4989 printk(KERN_INFO
4990 " task PC stack pid father\n");
4991 #else
4992 printk(KERN_INFO
4993 " task PC stack pid father\n");
4994 #endif
4995 rcu_read_lock();
4996 for_each_process_thread(g, p) {
4997 /*
4998 * reset the NMI-timeout, listing all files on a slow
4999 * console might take a lot of time:
5000 * Also, reset softlockup watchdogs on all CPUs, because
5001 * another CPU might be blocked waiting for us to process
5002 * an IPI.
5003 */
5004 touch_nmi_watchdog();
5005 touch_all_softlockup_watchdogs();
5006 if (!state_filter || (p->state & state_filter))
5007 sched_show_task(p);
5008 }
5009
5010 #ifdef CONFIG_SCHED_DEBUG
5011 sysrq_sched_debug_show();
5012 #endif
5013 rcu_read_unlock();
5014 /*
5015 * Only show locks if all tasks are dumped:
5016 */
5017 if (!state_filter)
5018 debug_show_all_locks();
5019 }
5020
init_idle_bootup_task(struct task_struct * idle)5021 void init_idle_bootup_task(struct task_struct *idle)
5022 {
5023 idle->sched_class = &idle_sched_class;
5024 }
5025
5026 /**
5027 * init_idle - set up an idle thread for a given CPU
5028 * @idle: task in question
5029 * @cpu: cpu the idle task belongs to
5030 *
5031 * NOTE: this function does not set the idle thread's NEED_RESCHED
5032 * flag, to make booting more robust.
5033 */
init_idle(struct task_struct * idle,int cpu)5034 void init_idle(struct task_struct *idle, int cpu)
5035 {
5036 struct rq *rq = cpu_rq(cpu);
5037 unsigned long flags;
5038
5039 raw_spin_lock_irqsave(&idle->pi_lock, flags);
5040 raw_spin_lock(&rq->lock);
5041
5042 __sched_fork(0, idle);
5043
5044 idle->state = TASK_RUNNING;
5045 idle->se.exec_start = sched_clock();
5046
5047 do_set_cpus_allowed(idle, cpumask_of(cpu));
5048 /*
5049 * We're having a chicken and egg problem, even though we are
5050 * holding rq->lock, the cpu isn't yet set to this cpu so the
5051 * lockdep check in task_group() will fail.
5052 *
5053 * Similar case to sched_fork(). / Alternatively we could
5054 * use task_rq_lock() here and obtain the other rq->lock.
5055 *
5056 * Silence PROVE_RCU
5057 */
5058 rcu_read_lock();
5059 __set_task_cpu(idle, cpu);
5060 rcu_read_unlock();
5061
5062 rq->curr = rq->idle = idle;
5063 idle->on_rq = TASK_ON_RQ_QUEUED;
5064 #if defined(CONFIG_SMP)
5065 idle->on_cpu = 1;
5066 #endif
5067 raw_spin_unlock(&rq->lock);
5068 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
5069
5070 /* Set the preempt count _outside_ the spinlocks! */
5071 init_idle_preempt_count(idle, cpu);
5072
5073 /*
5074 * The idle tasks have their own, simple scheduling class:
5075 */
5076 idle->sched_class = &idle_sched_class;
5077 ftrace_graph_init_idle_task(idle, cpu);
5078 vtime_init_idle(idle, cpu);
5079 #if defined(CONFIG_SMP)
5080 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
5081 #endif
5082 }
5083
5084 #ifdef CONFIG_SMP
5085
5086 #ifdef CONFIG_NUMA_BALANCING
5087 /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)5088 int migrate_task_to(struct task_struct *p, int target_cpu)
5089 {
5090 struct migration_arg arg = { p, target_cpu };
5091 int curr_cpu = task_cpu(p);
5092
5093 if (curr_cpu == target_cpu)
5094 return 0;
5095
5096 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
5097 return -EINVAL;
5098
5099 /* TODO: This is not properly updating schedstats */
5100
5101 trace_sched_move_numa(p, curr_cpu, target_cpu);
5102 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
5103 }
5104
5105 /*
5106 * Requeue a task on a given node and accurately track the number of NUMA
5107 * tasks on the runqueues
5108 */
sched_setnuma(struct task_struct * p,int nid)5109 void sched_setnuma(struct task_struct *p, int nid)
5110 {
5111 struct rq *rq;
5112 unsigned long flags;
5113 bool queued, running;
5114
5115 rq = task_rq_lock(p, &flags);
5116 queued = task_on_rq_queued(p);
5117 running = task_current(rq, p);
5118
5119 if (queued)
5120 dequeue_task(rq, p, 0);
5121 if (running)
5122 put_prev_task(rq, p);
5123
5124 p->numa_preferred_nid = nid;
5125
5126 if (running)
5127 p->sched_class->set_curr_task(rq);
5128 if (queued)
5129 enqueue_task(rq, p, 0);
5130 task_rq_unlock(rq, p, &flags);
5131 }
5132 #endif /* CONFIG_NUMA_BALANCING */
5133
5134 #ifdef CONFIG_HOTPLUG_CPU
5135 /*
5136 * Ensures that the idle task is using init_mm right before its cpu goes
5137 * offline.
5138 */
idle_task_exit(void)5139 void idle_task_exit(void)
5140 {
5141 struct mm_struct *mm = current->active_mm;
5142
5143 BUG_ON(cpu_online(smp_processor_id()));
5144
5145 if (mm != &init_mm) {
5146 switch_mm(mm, &init_mm, current);
5147 finish_arch_post_lock_switch();
5148 }
5149 mmdrop(mm);
5150 }
5151
5152 /*
5153 * Since this CPU is going 'away' for a while, fold any nr_active delta
5154 * we might have. Assumes we're called after migrate_tasks() so that the
5155 * nr_active count is stable.
5156 *
5157 * Also see the comment "Global load-average calculations".
5158 */
calc_load_migrate(struct rq * rq)5159 static void calc_load_migrate(struct rq *rq)
5160 {
5161 long delta = calc_load_fold_active(rq);
5162 if (delta)
5163 atomic_long_add(delta, &calc_load_tasks);
5164 }
5165
put_prev_task_fake(struct rq * rq,struct task_struct * prev)5166 static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
5167 {
5168 }
5169
5170 static const struct sched_class fake_sched_class = {
5171 .put_prev_task = put_prev_task_fake,
5172 };
5173
5174 static struct task_struct fake_task = {
5175 /*
5176 * Avoid pull_{rt,dl}_task()
5177 */
5178 .prio = MAX_PRIO + 1,
5179 .sched_class = &fake_sched_class,
5180 };
5181
5182 /*
5183 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5184 * try_to_wake_up()->select_task_rq().
5185 *
5186 * Called with rq->lock held even though we'er in stop_machine() and
5187 * there's no concurrency possible, we hold the required locks anyway
5188 * because of lock validation efforts.
5189 */
migrate_tasks(unsigned int dead_cpu)5190 static void migrate_tasks(unsigned int dead_cpu)
5191 {
5192 struct rq *rq = cpu_rq(dead_cpu);
5193 struct task_struct *next, *stop = rq->stop;
5194 int dest_cpu;
5195
5196 /*
5197 * Fudge the rq selection such that the below task selection loop
5198 * doesn't get stuck on the currently eligible stop task.
5199 *
5200 * We're currently inside stop_machine() and the rq is either stuck
5201 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5202 * either way we should never end up calling schedule() until we're
5203 * done here.
5204 */
5205 rq->stop = NULL;
5206
5207 /*
5208 * put_prev_task() and pick_next_task() sched
5209 * class method both need to have an up-to-date
5210 * value of rq->clock[_task]
5211 */
5212 update_rq_clock(rq);
5213
5214 for ( ; ; ) {
5215 /*
5216 * There's this thread running, bail when that's the only
5217 * remaining thread.
5218 */
5219 if (rq->nr_running == 1)
5220 break;
5221
5222 next = pick_next_task(rq, &fake_task);
5223 BUG_ON(!next);
5224 next->sched_class->put_prev_task(rq, next);
5225
5226 /* Find suitable destination for @next, with force if needed. */
5227 dest_cpu = select_fallback_rq(dead_cpu, next);
5228 raw_spin_unlock(&rq->lock);
5229
5230 __migrate_task(next, dead_cpu, dest_cpu);
5231
5232 raw_spin_lock(&rq->lock);
5233 }
5234
5235 rq->stop = stop;
5236 }
5237 #endif /* CONFIG_HOTPLUG_CPU */
5238
5239 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5240
5241 static struct ctl_table sd_ctl_dir[] = {
5242 {
5243 .procname = "sched_domain",
5244 .mode = 0555,
5245 },
5246 {}
5247 };
5248
5249 static struct ctl_table sd_ctl_root[] = {
5250 {
5251 .procname = "kernel",
5252 .mode = 0555,
5253 .child = sd_ctl_dir,
5254 },
5255 {}
5256 };
5257
sd_alloc_ctl_entry(int n)5258 static struct ctl_table *sd_alloc_ctl_entry(int n)
5259 {
5260 struct ctl_table *entry =
5261 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
5262
5263 return entry;
5264 }
5265
sd_free_ctl_entry(struct ctl_table ** tablep)5266 static void sd_free_ctl_entry(struct ctl_table **tablep)
5267 {
5268 struct ctl_table *entry;
5269
5270 /*
5271 * In the intermediate directories, both the child directory and
5272 * procname are dynamically allocated and could fail but the mode
5273 * will always be set. In the lowest directory the names are
5274 * static strings and all have proc handlers.
5275 */
5276 for (entry = *tablep; entry->mode; entry++) {
5277 if (entry->child)
5278 sd_free_ctl_entry(&entry->child);
5279 if (entry->proc_handler == NULL)
5280 kfree(entry->procname);
5281 }
5282
5283 kfree(*tablep);
5284 *tablep = NULL;
5285 }
5286
5287 static int min_load_idx = 0;
5288 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
5289
5290 static void
set_table_entry(struct ctl_table * entry,const char * procname,void * data,int maxlen,umode_t mode,proc_handler * proc_handler,bool load_idx)5291 set_table_entry(struct ctl_table *entry,
5292 const char *procname, void *data, int maxlen,
5293 umode_t mode, proc_handler *proc_handler,
5294 bool load_idx)
5295 {
5296 entry->procname = procname;
5297 entry->data = data;
5298 entry->maxlen = maxlen;
5299 entry->mode = mode;
5300 entry->proc_handler = proc_handler;
5301
5302 if (load_idx) {
5303 entry->extra1 = &min_load_idx;
5304 entry->extra2 = &max_load_idx;
5305 }
5306 }
5307
5308 static struct ctl_table *
sd_alloc_ctl_energy_table(struct sched_group_energy * sge)5309 sd_alloc_ctl_energy_table(struct sched_group_energy *sge)
5310 {
5311 struct ctl_table *table = sd_alloc_ctl_entry(5);
5312
5313 if (table == NULL)
5314 return NULL;
5315
5316 set_table_entry(&table[0], "nr_idle_states", &sge->nr_idle_states,
5317 sizeof(int), 0644, proc_dointvec_minmax, false);
5318 set_table_entry(&table[1], "idle_states", &sge->idle_states[0].power,
5319 sge->nr_idle_states*sizeof(struct idle_state), 0644,
5320 proc_doulongvec_minmax, false);
5321 set_table_entry(&table[2], "nr_cap_states", &sge->nr_cap_states,
5322 sizeof(int), 0644, proc_dointvec_minmax, false);
5323 set_table_entry(&table[3], "cap_states", &sge->cap_states[0].cap,
5324 sge->nr_cap_states*sizeof(struct capacity_state), 0644,
5325 proc_doulongvec_minmax, false);
5326
5327 return table;
5328 }
5329
5330 static struct ctl_table *
sd_alloc_ctl_group_table(struct sched_group * sg)5331 sd_alloc_ctl_group_table(struct sched_group *sg)
5332 {
5333 struct ctl_table *table = sd_alloc_ctl_entry(2);
5334
5335 if (table == NULL)
5336 return NULL;
5337
5338 table->procname = kstrdup("energy", GFP_KERNEL);
5339 table->mode = 0555;
5340 table->child = sd_alloc_ctl_energy_table((struct sched_group_energy *)sg->sge);
5341
5342 return table;
5343 }
5344
5345 static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain * sd)5346 sd_alloc_ctl_domain_table(struct sched_domain *sd)
5347 {
5348 struct ctl_table *table;
5349 unsigned int nr_entries = 14;
5350
5351 int i = 0;
5352 struct sched_group *sg = sd->groups;
5353
5354 if (sg->sge) {
5355 int nr_sgs = 0;
5356
5357 do {} while (nr_sgs++, sg = sg->next, sg != sd->groups);
5358
5359 nr_entries += nr_sgs;
5360 }
5361
5362 table = sd_alloc_ctl_entry(nr_entries);
5363
5364 if (table == NULL)
5365 return NULL;
5366
5367 set_table_entry(&table[0], "min_interval", &sd->min_interval,
5368 sizeof(long), 0644, proc_doulongvec_minmax, false);
5369 set_table_entry(&table[1], "max_interval", &sd->max_interval,
5370 sizeof(long), 0644, proc_doulongvec_minmax, false);
5371 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
5372 sizeof(int), 0644, proc_dointvec_minmax, true);
5373 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
5374 sizeof(int), 0644, proc_dointvec_minmax, true);
5375 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
5376 sizeof(int), 0644, proc_dointvec_minmax, true);
5377 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
5378 sizeof(int), 0644, proc_dointvec_minmax, true);
5379 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
5380 sizeof(int), 0644, proc_dointvec_minmax, true);
5381 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
5382 sizeof(int), 0644, proc_dointvec_minmax, false);
5383 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
5384 sizeof(int), 0644, proc_dointvec_minmax, false);
5385 set_table_entry(&table[9], "cache_nice_tries",
5386 &sd->cache_nice_tries,
5387 sizeof(int), 0644, proc_dointvec_minmax, false);
5388 set_table_entry(&table[10], "flags", &sd->flags,
5389 sizeof(int), 0644, proc_dointvec_minmax, false);
5390 set_table_entry(&table[11], "max_newidle_lb_cost",
5391 &sd->max_newidle_lb_cost,
5392 sizeof(long), 0644, proc_doulongvec_minmax, false);
5393 set_table_entry(&table[12], "name", sd->name,
5394 CORENAME_MAX_SIZE, 0444, proc_dostring, false);
5395 sg = sd->groups;
5396 if (sg->sge) {
5397 char buf[32];
5398 struct ctl_table *entry = &table[13];
5399
5400 do {
5401 snprintf(buf, 32, "group%d", i);
5402 entry->procname = kstrdup(buf, GFP_KERNEL);
5403 entry->mode = 0555;
5404 entry->child = sd_alloc_ctl_group_table(sg);
5405 } while (entry++, i++, sg = sg->next, sg != sd->groups);
5406 }
5407 /* &table[nr_entries-1] is terminator */
5408
5409 return table;
5410 }
5411
sd_alloc_ctl_cpu_table(int cpu)5412 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
5413 {
5414 struct ctl_table *entry, *table;
5415 struct sched_domain *sd;
5416 int domain_num = 0, i;
5417 char buf[32];
5418
5419 for_each_domain(cpu, sd)
5420 domain_num++;
5421 entry = table = sd_alloc_ctl_entry(domain_num + 1);
5422 if (table == NULL)
5423 return NULL;
5424
5425 i = 0;
5426 for_each_domain(cpu, sd) {
5427 snprintf(buf, 32, "domain%d", i);
5428 entry->procname = kstrdup(buf, GFP_KERNEL);
5429 entry->mode = 0555;
5430 entry->child = sd_alloc_ctl_domain_table(sd);
5431 entry++;
5432 i++;
5433 }
5434 return table;
5435 }
5436
5437 static struct ctl_table_header *sd_sysctl_header;
register_sched_domain_sysctl(void)5438 static void register_sched_domain_sysctl(void)
5439 {
5440 int i, cpu_num = num_possible_cpus();
5441 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5442 char buf[32];
5443
5444 WARN_ON(sd_ctl_dir[0].child);
5445 sd_ctl_dir[0].child = entry;
5446
5447 if (entry == NULL)
5448 return;
5449
5450 for_each_possible_cpu(i) {
5451 snprintf(buf, 32, "cpu%d", i);
5452 entry->procname = kstrdup(buf, GFP_KERNEL);
5453 entry->mode = 0555;
5454 entry->child = sd_alloc_ctl_cpu_table(i);
5455 entry++;
5456 }
5457
5458 WARN_ON(sd_sysctl_header);
5459 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5460 }
5461
5462 /* may be called multiple times per register */
unregister_sched_domain_sysctl(void)5463 static void unregister_sched_domain_sysctl(void)
5464 {
5465 if (sd_sysctl_header)
5466 unregister_sysctl_table(sd_sysctl_header);
5467 sd_sysctl_header = NULL;
5468 if (sd_ctl_dir[0].child)
5469 sd_free_ctl_entry(&sd_ctl_dir[0].child);
5470 }
5471 #else
register_sched_domain_sysctl(void)5472 static void register_sched_domain_sysctl(void)
5473 {
5474 }
unregister_sched_domain_sysctl(void)5475 static void unregister_sched_domain_sysctl(void)
5476 {
5477 }
5478 #endif /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */
5479
set_rq_online(struct rq * rq)5480 static void set_rq_online(struct rq *rq)
5481 {
5482 if (!rq->online) {
5483 const struct sched_class *class;
5484
5485 cpumask_set_cpu(rq->cpu, rq->rd->online);
5486 rq->online = 1;
5487
5488 for_each_class(class) {
5489 if (class->rq_online)
5490 class->rq_online(rq);
5491 }
5492 }
5493 }
5494
set_rq_offline(struct rq * rq)5495 static void set_rq_offline(struct rq *rq)
5496 {
5497 if (rq->online) {
5498 const struct sched_class *class;
5499
5500 for_each_class(class) {
5501 if (class->rq_offline)
5502 class->rq_offline(rq);
5503 }
5504
5505 cpumask_clear_cpu(rq->cpu, rq->rd->online);
5506 rq->online = 0;
5507 }
5508 }
5509
5510 /*
5511 * migration_call - callback that gets triggered when a CPU is added.
5512 * Here we can start up the necessary migration thread for the new CPU.
5513 */
5514 static int
migration_call(struct notifier_block * nfb,unsigned long action,void * hcpu)5515 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5516 {
5517 int cpu = (long)hcpu;
5518 unsigned long flags;
5519 struct rq *rq = cpu_rq(cpu);
5520
5521 switch (action & ~CPU_TASKS_FROZEN) {
5522
5523 case CPU_UP_PREPARE:
5524 raw_spin_lock_irqsave(&rq->lock, flags);
5525 walt_set_window_start(rq);
5526 raw_spin_unlock_irqrestore(&rq->lock, flags);
5527 rq->calc_load_update = calc_load_update;
5528 break;
5529
5530 case CPU_ONLINE:
5531 /* Update our root-domain */
5532 raw_spin_lock_irqsave(&rq->lock, flags);
5533 if (rq->rd) {
5534 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5535
5536 set_rq_online(rq);
5537 }
5538 raw_spin_unlock_irqrestore(&rq->lock, flags);
5539 break;
5540
5541 #ifdef CONFIG_HOTPLUG_CPU
5542 case CPU_DYING:
5543 sched_ttwu_pending();
5544 /* Update our root-domain */
5545 raw_spin_lock_irqsave(&rq->lock, flags);
5546 walt_migrate_sync_cpu(cpu);
5547 if (rq->rd) {
5548 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5549 set_rq_offline(rq);
5550 }
5551 migrate_tasks(cpu);
5552 BUG_ON(rq->nr_running != 1); /* the migration thread */
5553 raw_spin_unlock_irqrestore(&rq->lock, flags);
5554 break;
5555
5556 case CPU_DEAD:
5557 calc_load_migrate(rq);
5558 break;
5559 #endif
5560 }
5561
5562 update_max_interval();
5563
5564 return NOTIFY_OK;
5565 }
5566
5567 /*
5568 * Register at high priority so that task migration (migrate_all_tasks)
5569 * happens before everything else. This has to be lower priority than
5570 * the notifier in the perf_event subsystem, though.
5571 */
5572 static struct notifier_block migration_notifier = {
5573 .notifier_call = migration_call,
5574 .priority = CPU_PRI_MIGRATION,
5575 };
5576
set_cpu_rq_start_time(void)5577 static void __cpuinit set_cpu_rq_start_time(void)
5578 {
5579 int cpu = smp_processor_id();
5580 struct rq *rq = cpu_rq(cpu);
5581 rq->age_stamp = sched_clock_cpu(cpu);
5582 }
5583
sched_cpu_active(struct notifier_block * nfb,unsigned long action,void * hcpu)5584 static int sched_cpu_active(struct notifier_block *nfb,
5585 unsigned long action, void *hcpu)
5586 {
5587 switch (action & ~CPU_TASKS_FROZEN) {
5588 case CPU_STARTING:
5589 set_cpu_rq_start_time();
5590 return NOTIFY_OK;
5591 case CPU_ONLINE:
5592 /*
5593 * At this point a starting CPU has marked itself as online via
5594 * set_cpu_online(). But it might not yet have marked itself
5595 * as active, which is essential from here on.
5596 *
5597 * Thus, fall-through and help the starting CPU along.
5598 */
5599 case CPU_DOWN_FAILED:
5600 set_cpu_active((long)hcpu, true);
5601 return NOTIFY_OK;
5602 default:
5603 return NOTIFY_DONE;
5604 }
5605 }
5606
sched_cpu_inactive(struct notifier_block * nfb,unsigned long action,void * hcpu)5607 static int sched_cpu_inactive(struct notifier_block *nfb,
5608 unsigned long action, void *hcpu)
5609 {
5610 unsigned long flags;
5611 long cpu = (long)hcpu;
5612 struct dl_bw *dl_b;
5613
5614 switch (action & ~CPU_TASKS_FROZEN) {
5615 case CPU_DOWN_PREPARE:
5616 set_cpu_active(cpu, false);
5617
5618 /* explicitly allow suspend */
5619 if (!(action & CPU_TASKS_FROZEN)) {
5620 bool overflow;
5621 int cpus;
5622
5623 rcu_read_lock_sched();
5624 dl_b = dl_bw_of(cpu);
5625
5626 raw_spin_lock_irqsave(&dl_b->lock, flags);
5627 cpus = dl_bw_cpus(cpu);
5628 overflow = __dl_overflow(dl_b, cpus, 0, 0);
5629 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
5630
5631 rcu_read_unlock_sched();
5632
5633 if (overflow)
5634 return notifier_from_errno(-EBUSY);
5635 }
5636 return NOTIFY_OK;
5637 }
5638
5639 return NOTIFY_DONE;
5640 }
5641
migration_init(void)5642 static int __init migration_init(void)
5643 {
5644 void *cpu = (void *)(long)smp_processor_id();
5645 int err;
5646
5647 /* Initialize migration for the boot CPU */
5648 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5649 BUG_ON(err == NOTIFY_BAD);
5650 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5651 register_cpu_notifier(&migration_notifier);
5652
5653 /* Register cpu active notifiers */
5654 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5655 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5656
5657 return 0;
5658 }
5659 early_initcall(migration_init);
5660
5661 static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5662
5663 #ifdef CONFIG_SCHED_DEBUG
5664
5665 static __read_mostly int sched_debug_enabled;
5666
sched_debug_setup(char * str)5667 static int __init sched_debug_setup(char *str)
5668 {
5669 sched_debug_enabled = 1;
5670
5671 return 0;
5672 }
5673 early_param("sched_debug", sched_debug_setup);
5674
sched_debug(void)5675 static inline bool sched_debug(void)
5676 {
5677 return sched_debug_enabled;
5678 }
5679
sched_domain_debug_one(struct sched_domain * sd,int cpu,int level,struct cpumask * groupmask)5680 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5681 struct cpumask *groupmask)
5682 {
5683 struct sched_group *group = sd->groups;
5684 char str[256];
5685
5686 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
5687 cpumask_clear(groupmask);
5688
5689 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5690
5691 if (!(sd->flags & SD_LOAD_BALANCE)) {
5692 printk("does not load-balance\n");
5693 if (sd->parent)
5694 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5695 " has parent");
5696 return -1;
5697 }
5698
5699 printk(KERN_CONT "span %s level %s\n", str, sd->name);
5700
5701 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
5702 printk(KERN_ERR "ERROR: domain->span does not contain "
5703 "CPU%d\n", cpu);
5704 }
5705 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
5706 printk(KERN_ERR "ERROR: domain->groups does not contain"
5707 " CPU%d\n", cpu);
5708 }
5709
5710 printk(KERN_DEBUG "%*s groups:", level + 1, "");
5711 do {
5712 if (!group) {
5713 printk("\n");
5714 printk(KERN_ERR "ERROR: group is NULL\n");
5715 break;
5716 }
5717
5718 if (!cpumask_weight(sched_group_cpus(group))) {
5719 printk(KERN_CONT "\n");
5720 printk(KERN_ERR "ERROR: empty group\n");
5721 break;
5722 }
5723
5724 if (!(sd->flags & SD_OVERLAP) &&
5725 cpumask_intersects(groupmask, sched_group_cpus(group))) {
5726 printk(KERN_CONT "\n");
5727 printk(KERN_ERR "ERROR: repeated CPUs\n");
5728 break;
5729 }
5730
5731 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
5732
5733 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
5734
5735 printk(KERN_CONT " %s", str);
5736 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
5737 printk(KERN_CONT " (cpu_capacity = %lu)",
5738 group->sgc->capacity);
5739 }
5740
5741 group = group->next;
5742 } while (group != sd->groups);
5743 printk(KERN_CONT "\n");
5744
5745 if (!cpumask_equal(sched_domain_span(sd), groupmask))
5746 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
5747
5748 if (sd->parent &&
5749 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
5750 printk(KERN_ERR "ERROR: parent span is not a superset "
5751 "of domain->span\n");
5752 return 0;
5753 }
5754
sched_domain_debug(struct sched_domain * sd,int cpu)5755 static void sched_domain_debug(struct sched_domain *sd, int cpu)
5756 {
5757 int level = 0;
5758
5759 if (!sched_debug_enabled)
5760 return;
5761
5762 if (!sd) {
5763 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5764 return;
5765 }
5766
5767 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5768
5769 for (;;) {
5770 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
5771 break;
5772 level++;
5773 sd = sd->parent;
5774 if (!sd)
5775 break;
5776 }
5777 }
5778 #else /* !CONFIG_SCHED_DEBUG */
5779 # define sched_domain_debug(sd, cpu) do { } while (0)
sched_debug(void)5780 static inline bool sched_debug(void)
5781 {
5782 return false;
5783 }
5784 #endif /* CONFIG_SCHED_DEBUG */
5785
sd_degenerate(struct sched_domain * sd)5786 static int sd_degenerate(struct sched_domain *sd)
5787 {
5788 if (cpumask_weight(sched_domain_span(sd)) == 1)
5789 return 1;
5790
5791 /* Following flags need at least 2 groups */
5792 if (sd->flags & (SD_LOAD_BALANCE |
5793 SD_BALANCE_NEWIDLE |
5794 SD_BALANCE_FORK |
5795 SD_BALANCE_EXEC |
5796 SD_SHARE_CPUCAPACITY |
5797 SD_SHARE_PKG_RESOURCES |
5798 SD_SHARE_POWERDOMAIN |
5799 SD_SHARE_CAP_STATES)) {
5800 if (sd->groups != sd->groups->next)
5801 return 0;
5802 }
5803
5804 /* Following flags don't use groups */
5805 if (sd->flags & (SD_WAKE_AFFINE))
5806 return 0;
5807
5808 return 1;
5809 }
5810
5811 static int
sd_parent_degenerate(struct sched_domain * sd,struct sched_domain * parent)5812 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5813 {
5814 unsigned long cflags = sd->flags, pflags = parent->flags;
5815
5816 if (sd_degenerate(parent))
5817 return 1;
5818
5819 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
5820 return 0;
5821
5822 /* Flags needing groups don't count if only 1 group in parent */
5823 if (parent->groups == parent->groups->next) {
5824 pflags &= ~(SD_LOAD_BALANCE |
5825 SD_BALANCE_NEWIDLE |
5826 SD_BALANCE_FORK |
5827 SD_BALANCE_EXEC |
5828 SD_SHARE_CPUCAPACITY |
5829 SD_SHARE_PKG_RESOURCES |
5830 SD_PREFER_SIBLING |
5831 SD_SHARE_POWERDOMAIN |
5832 SD_SHARE_CAP_STATES);
5833 if (nr_node_ids == 1)
5834 pflags &= ~SD_SERIALIZE;
5835 }
5836 if (~cflags & pflags)
5837 return 0;
5838
5839 return 1;
5840 }
5841
free_rootdomain(struct rcu_head * rcu)5842 static void free_rootdomain(struct rcu_head *rcu)
5843 {
5844 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
5845
5846 cpupri_cleanup(&rd->cpupri);
5847 cpudl_cleanup(&rd->cpudl);
5848 free_cpumask_var(rd->dlo_mask);
5849 free_cpumask_var(rd->rto_mask);
5850 free_cpumask_var(rd->online);
5851 free_cpumask_var(rd->span);
5852 kfree(rd);
5853 }
5854
rq_attach_root(struct rq * rq,struct root_domain * rd)5855 static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5856 {
5857 struct root_domain *old_rd = NULL;
5858 unsigned long flags;
5859
5860 raw_spin_lock_irqsave(&rq->lock, flags);
5861
5862 if (rq->rd) {
5863 old_rd = rq->rd;
5864
5865 if (cpumask_test_cpu(rq->cpu, old_rd->online))
5866 set_rq_offline(rq);
5867
5868 cpumask_clear_cpu(rq->cpu, old_rd->span);
5869
5870 /*
5871 * If we dont want to free the old_rd yet then
5872 * set old_rd to NULL to skip the freeing later
5873 * in this function:
5874 */
5875 if (!atomic_dec_and_test(&old_rd->refcount))
5876 old_rd = NULL;
5877 }
5878
5879 atomic_inc(&rd->refcount);
5880 rq->rd = rd;
5881
5882 cpumask_set_cpu(rq->cpu, rd->span);
5883 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
5884 set_rq_online(rq);
5885
5886 raw_spin_unlock_irqrestore(&rq->lock, flags);
5887
5888 if (old_rd)
5889 call_rcu_sched(&old_rd->rcu, free_rootdomain);
5890 }
5891
init_rootdomain(struct root_domain * rd)5892 static int init_rootdomain(struct root_domain *rd)
5893 {
5894 memset(rd, 0, sizeof(*rd));
5895
5896 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
5897 goto out;
5898 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
5899 goto free_span;
5900 if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
5901 goto free_online;
5902 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5903 goto free_dlo_mask;
5904
5905 init_dl_bw(&rd->dl_bw);
5906 if (cpudl_init(&rd->cpudl) != 0)
5907 goto free_dlo_mask;
5908
5909 if (cpupri_init(&rd->cpupri) != 0)
5910 goto free_rto_mask;
5911
5912 init_max_cpu_capacity(&rd->max_cpu_capacity);
5913 return 0;
5914
5915 free_rto_mask:
5916 free_cpumask_var(rd->rto_mask);
5917 free_dlo_mask:
5918 free_cpumask_var(rd->dlo_mask);
5919 free_online:
5920 free_cpumask_var(rd->online);
5921 free_span:
5922 free_cpumask_var(rd->span);
5923 out:
5924 return -ENOMEM;
5925 }
5926
5927 /*
5928 * By default the system creates a single root-domain with all cpus as
5929 * members (mimicking the global state we have today).
5930 */
5931 struct root_domain def_root_domain;
5932
init_defrootdomain(void)5933 static void init_defrootdomain(void)
5934 {
5935 init_rootdomain(&def_root_domain);
5936
5937 atomic_set(&def_root_domain.refcount, 1);
5938 }
5939
alloc_rootdomain(void)5940 static struct root_domain *alloc_rootdomain(void)
5941 {
5942 struct root_domain *rd;
5943
5944 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5945 if (!rd)
5946 return NULL;
5947
5948 if (init_rootdomain(rd) != 0) {
5949 kfree(rd);
5950 return NULL;
5951 }
5952
5953 return rd;
5954 }
5955
free_sched_groups(struct sched_group * sg,int free_sgc)5956 static void free_sched_groups(struct sched_group *sg, int free_sgc)
5957 {
5958 struct sched_group *tmp, *first;
5959
5960 if (!sg)
5961 return;
5962
5963 first = sg;
5964 do {
5965 tmp = sg->next;
5966
5967 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
5968 kfree(sg->sgc);
5969
5970 kfree(sg);
5971 sg = tmp;
5972 } while (sg != first);
5973 }
5974
free_sched_domain(struct rcu_head * rcu)5975 static void free_sched_domain(struct rcu_head *rcu)
5976 {
5977 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
5978
5979 /*
5980 * If its an overlapping domain it has private groups, iterate and
5981 * nuke them all.
5982 */
5983 if (sd->flags & SD_OVERLAP) {
5984 free_sched_groups(sd->groups, 1);
5985 } else if (atomic_dec_and_test(&sd->groups->ref)) {
5986 kfree(sd->groups->sgc);
5987 kfree(sd->groups);
5988 }
5989 kfree(sd);
5990 }
5991
destroy_sched_domain(struct sched_domain * sd,int cpu)5992 static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5993 {
5994 call_rcu(&sd->rcu, free_sched_domain);
5995 }
5996
destroy_sched_domains(struct sched_domain * sd,int cpu)5997 static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5998 {
5999 for (; sd; sd = sd->parent)
6000 destroy_sched_domain(sd, cpu);
6001 }
6002
6003 /*
6004 * Keep a special pointer to the highest sched_domain that has
6005 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
6006 * allows us to avoid some pointer chasing select_idle_sibling().
6007 *
6008 * Also keep a unique ID per domain (we use the first cpu number in
6009 * the cpumask of the domain), this allows us to quickly tell if
6010 * two cpus are in the same cache domain, see cpus_share_cache().
6011 */
6012 DEFINE_PER_CPU(struct sched_domain *, sd_llc);
6013 DEFINE_PER_CPU(int, sd_llc_size);
6014 DEFINE_PER_CPU(int, sd_llc_id);
6015 DEFINE_PER_CPU(struct sched_domain *, sd_numa);
6016 DEFINE_PER_CPU(struct sched_domain *, sd_busy);
6017 DEFINE_PER_CPU(struct sched_domain *, sd_asym);
6018 DEFINE_PER_CPU(struct sched_domain *, sd_ea);
6019 DEFINE_PER_CPU(struct sched_domain *, sd_scs);
6020
update_top_cache_domain(int cpu)6021 static void update_top_cache_domain(int cpu)
6022 {
6023 struct sched_domain *sd;
6024 struct sched_domain *busy_sd = NULL, *ea_sd = NULL;
6025 int id = cpu;
6026 int size = 1;
6027
6028 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
6029 if (sd) {
6030 id = cpumask_first(sched_domain_span(sd));
6031 size = cpumask_weight(sched_domain_span(sd));
6032 busy_sd = sd->parent; /* sd_busy */
6033 }
6034 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
6035
6036 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
6037 per_cpu(sd_llc_size, cpu) = size;
6038 per_cpu(sd_llc_id, cpu) = id;
6039
6040 sd = lowest_flag_domain(cpu, SD_NUMA);
6041 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
6042
6043 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
6044 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
6045
6046 for_each_domain(cpu, sd) {
6047 if (sd->groups->sge)
6048 ea_sd = sd;
6049 else
6050 break;
6051 }
6052 rcu_assign_pointer(per_cpu(sd_ea, cpu), ea_sd);
6053
6054 sd = highest_flag_domain(cpu, SD_SHARE_CAP_STATES);
6055 rcu_assign_pointer(per_cpu(sd_scs, cpu), sd);
6056 }
6057
6058 /*
6059 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
6060 * hold the hotplug lock.
6061 */
6062 static void
cpu_attach_domain(struct sched_domain * sd,struct root_domain * rd,int cpu)6063 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6064 {
6065 struct rq *rq = cpu_rq(cpu);
6066 struct sched_domain *tmp;
6067
6068 /* Remove the sched domains which do not contribute to scheduling. */
6069 for (tmp = sd; tmp; ) {
6070 struct sched_domain *parent = tmp->parent;
6071 if (!parent)
6072 break;
6073
6074 if (sd_parent_degenerate(tmp, parent)) {
6075 tmp->parent = parent->parent;
6076 if (parent->parent)
6077 parent->parent->child = tmp;
6078 /*
6079 * Transfer SD_PREFER_SIBLING down in case of a
6080 * degenerate parent; the spans match for this
6081 * so the property transfers.
6082 */
6083 if (parent->flags & SD_PREFER_SIBLING)
6084 tmp->flags |= SD_PREFER_SIBLING;
6085 destroy_sched_domain(parent, cpu);
6086 } else
6087 tmp = tmp->parent;
6088 }
6089
6090 if (sd && sd_degenerate(sd)) {
6091 tmp = sd;
6092 sd = sd->parent;
6093 destroy_sched_domain(tmp, cpu);
6094 if (sd)
6095 sd->child = NULL;
6096 }
6097
6098 sched_domain_debug(sd, cpu);
6099
6100 rq_attach_root(rq, rd);
6101 tmp = rq->sd;
6102 rcu_assign_pointer(rq->sd, sd);
6103 destroy_sched_domains(tmp, cpu);
6104
6105 update_top_cache_domain(cpu);
6106 }
6107
6108 /* cpus with isolated domains */
6109 static cpumask_var_t cpu_isolated_map;
6110
6111 /* Setup the mask of cpus configured for isolated domains */
isolated_cpu_setup(char * str)6112 static int __init isolated_cpu_setup(char *str)
6113 {
6114 alloc_bootmem_cpumask_var(&cpu_isolated_map);
6115 cpulist_parse(str, cpu_isolated_map);
6116 return 1;
6117 }
6118
6119 __setup("isolcpus=", isolated_cpu_setup);
6120
6121 struct s_data {
6122 struct sched_domain ** __percpu sd;
6123 struct root_domain *rd;
6124 };
6125
6126 enum s_alloc {
6127 sa_rootdomain,
6128 sa_sd,
6129 sa_sd_storage,
6130 sa_none,
6131 };
6132
6133 /*
6134 * Build an iteration mask that can exclude certain CPUs from the upwards
6135 * domain traversal.
6136 *
6137 * Only CPUs that can arrive at this group should be considered to continue
6138 * balancing.
6139 *
6140 * Asymmetric node setups can result in situations where the domain tree is of
6141 * unequal depth, make sure to skip domains that already cover the entire
6142 * range.
6143 *
6144 * In that case build_sched_domains() will have terminated the iteration early
6145 * and our sibling sd spans will be empty. Domains should always include the
6146 * cpu they're built on, so check that.
6147 *
6148 */
build_group_mask(struct sched_domain * sd,struct sched_group * sg)6149 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
6150 {
6151 const struct cpumask *sg_span = sched_group_cpus(sg);
6152 struct sd_data *sdd = sd->private;
6153 struct sched_domain *sibling;
6154 int i;
6155
6156 for_each_cpu(i, sg_span) {
6157 sibling = *per_cpu_ptr(sdd->sd, i);
6158
6159 /*
6160 * Can happen in the asymmetric case, where these siblings are
6161 * unused. The mask will not be empty because those CPUs that
6162 * do have the top domain _should_ span the domain.
6163 */
6164 if (!sibling->child)
6165 continue;
6166
6167 /* If we would not end up here, we can't continue from here */
6168 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
6169 continue;
6170
6171 cpumask_set_cpu(i, sched_group_mask(sg));
6172 }
6173
6174 /* We must not have empty masks here */
6175 WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
6176 }
6177
6178 /*
6179 * Return the canonical balance cpu for this group, this is the first cpu
6180 * of this group that's also in the iteration mask.
6181 */
group_balance_cpu(struct sched_group * sg)6182 int group_balance_cpu(struct sched_group *sg)
6183 {
6184 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
6185 }
6186
6187 static int
build_overlap_sched_groups(struct sched_domain * sd,int cpu)6188 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
6189 {
6190 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
6191 const struct cpumask *span = sched_domain_span(sd);
6192 struct cpumask *covered = sched_domains_tmpmask;
6193 struct sd_data *sdd = sd->private;
6194 struct sched_domain *sibling;
6195 int i;
6196
6197 cpumask_clear(covered);
6198
6199 for_each_cpu(i, span) {
6200 struct cpumask *sg_span;
6201
6202 if (cpumask_test_cpu(i, covered))
6203 continue;
6204
6205 sibling = *per_cpu_ptr(sdd->sd, i);
6206
6207 /* See the comment near build_group_mask(). */
6208 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6209 continue;
6210
6211 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6212 GFP_KERNEL, cpu_to_node(cpu));
6213
6214 if (!sg)
6215 goto fail;
6216
6217 sg_span = sched_group_cpus(sg);
6218 if (sibling->child)
6219 cpumask_copy(sg_span, sched_domain_span(sibling->child));
6220 else
6221 cpumask_set_cpu(i, sg_span);
6222
6223 cpumask_or(covered, covered, sg_span);
6224
6225 sg->sgc = *per_cpu_ptr(sdd->sgc, i);
6226 if (atomic_inc_return(&sg->sgc->ref) == 1)
6227 build_group_mask(sd, sg);
6228
6229 /*
6230 * Initialize sgc->capacity such that even if we mess up the
6231 * domains and no possible iteration will get us here, we won't
6232 * die on a /0 trap.
6233 */
6234 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
6235 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
6236
6237 /*
6238 * Make sure the first group of this domain contains the
6239 * canonical balance cpu. Otherwise the sched_domain iteration
6240 * breaks. See update_sg_lb_stats().
6241 */
6242 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
6243 group_balance_cpu(sg) == cpu)
6244 groups = sg;
6245
6246 if (!first)
6247 first = sg;
6248 if (last)
6249 last->next = sg;
6250 last = sg;
6251 last->next = first;
6252 }
6253 sd->groups = groups;
6254
6255 return 0;
6256
6257 fail:
6258 free_sched_groups(first, 0);
6259
6260 return -ENOMEM;
6261 }
6262
get_group(int cpu,struct sd_data * sdd,struct sched_group ** sg)6263 static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
6264 {
6265 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6266 struct sched_domain *child = sd->child;
6267
6268 if (child)
6269 cpu = cpumask_first(sched_domain_span(child));
6270
6271 if (sg) {
6272 *sg = *per_cpu_ptr(sdd->sg, cpu);
6273 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
6274 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
6275 }
6276
6277 return cpu;
6278 }
6279
6280 /*
6281 * build_sched_groups will build a circular linked list of the groups
6282 * covered by the given span, and will set each group's ->cpumask correctly,
6283 * and ->cpu_capacity to 0.
6284 *
6285 * Assumes the sched_domain tree is fully constructed
6286 */
6287 static int
build_sched_groups(struct sched_domain * sd,int cpu)6288 build_sched_groups(struct sched_domain *sd, int cpu)
6289 {
6290 struct sched_group *first = NULL, *last = NULL;
6291 struct sd_data *sdd = sd->private;
6292 const struct cpumask *span = sched_domain_span(sd);
6293 struct cpumask *covered;
6294 int i;
6295
6296 get_group(cpu, sdd, &sd->groups);
6297 atomic_inc(&sd->groups->ref);
6298
6299 if (cpu != cpumask_first(span))
6300 return 0;
6301
6302 lockdep_assert_held(&sched_domains_mutex);
6303 covered = sched_domains_tmpmask;
6304
6305 cpumask_clear(covered);
6306
6307 for_each_cpu(i, span) {
6308 struct sched_group *sg;
6309 int group, j;
6310
6311 if (cpumask_test_cpu(i, covered))
6312 continue;
6313
6314 group = get_group(i, sdd, &sg);
6315 cpumask_setall(sched_group_mask(sg));
6316
6317 for_each_cpu(j, span) {
6318 if (get_group(j, sdd, NULL) != group)
6319 continue;
6320
6321 cpumask_set_cpu(j, covered);
6322 cpumask_set_cpu(j, sched_group_cpus(sg));
6323 }
6324
6325 if (!first)
6326 first = sg;
6327 if (last)
6328 last->next = sg;
6329 last = sg;
6330 }
6331 last->next = first;
6332
6333 return 0;
6334 }
6335
6336 /*
6337 * Initialize sched groups cpu_capacity.
6338 *
6339 * cpu_capacity indicates the capacity of sched group, which is used while
6340 * distributing the load between different sched groups in a sched domain.
6341 * Typically cpu_capacity for all the groups in a sched domain will be same
6342 * unless there are asymmetries in the topology. If there are asymmetries,
6343 * group having more cpu_capacity will pickup more load compared to the
6344 * group having less cpu_capacity.
6345 */
init_sched_groups_capacity(int cpu,struct sched_domain * sd)6346 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
6347 {
6348 struct sched_group *sg = sd->groups;
6349
6350 WARN_ON(!sg);
6351
6352 do {
6353 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6354 sg = sg->next;
6355 } while (sg != sd->groups);
6356
6357 if (cpu != group_balance_cpu(sg))
6358 return;
6359
6360 update_group_capacity(sd, cpu);
6361 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
6362 }
6363
6364 /*
6365 * Check that the per-cpu provided sd energy data is consistent for all cpus
6366 * within the mask.
6367 */
check_sched_energy_data(int cpu,sched_domain_energy_f fn,const struct cpumask * cpumask)6368 static inline void check_sched_energy_data(int cpu, sched_domain_energy_f fn,
6369 const struct cpumask *cpumask)
6370 {
6371 const struct sched_group_energy * const sge = fn(cpu);
6372 struct cpumask mask;
6373 int i;
6374
6375 if (cpumask_weight(cpumask) <= 1)
6376 return;
6377
6378 cpumask_xor(&mask, cpumask, get_cpu_mask(cpu));
6379
6380 for_each_cpu(i, &mask) {
6381 const struct sched_group_energy * const e = fn(i);
6382 int y;
6383
6384 BUG_ON(e->nr_idle_states != sge->nr_idle_states);
6385
6386 for (y = 0; y < (e->nr_idle_states); y++) {
6387 BUG_ON(e->idle_states[y].power !=
6388 sge->idle_states[y].power);
6389 }
6390
6391 BUG_ON(e->nr_cap_states != sge->nr_cap_states);
6392
6393 for (y = 0; y < (e->nr_cap_states); y++) {
6394 BUG_ON(e->cap_states[y].cap != sge->cap_states[y].cap);
6395 BUG_ON(e->cap_states[y].power !=
6396 sge->cap_states[y].power);
6397 }
6398 }
6399 }
6400
init_sched_energy(int cpu,struct sched_domain * sd,sched_domain_energy_f fn)6401 static void init_sched_energy(int cpu, struct sched_domain *sd,
6402 sched_domain_energy_f fn)
6403 {
6404 if (!(fn && fn(cpu)))
6405 return;
6406
6407 if (cpu != group_balance_cpu(sd->groups))
6408 return;
6409
6410 if (sd->child && !sd->child->groups->sge) {
6411 pr_err("BUG: EAS setup broken for CPU%d\n", cpu);
6412 #ifdef CONFIG_SCHED_DEBUG
6413 pr_err(" energy data on %s but not on %s domain\n",
6414 sd->name, sd->child->name);
6415 #endif
6416 return;
6417 }
6418
6419 check_sched_energy_data(cpu, fn, sched_group_cpus(sd->groups));
6420
6421 sd->groups->sge = fn(cpu);
6422 }
6423
6424 /*
6425 * Initializers for schedule domains
6426 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6427 */
6428
6429 static int default_relax_domain_level = -1;
6430 int sched_domain_level_max;
6431
setup_relax_domain_level(char * str)6432 static int __init setup_relax_domain_level(char *str)
6433 {
6434 if (kstrtoint(str, 0, &default_relax_domain_level))
6435 pr_warn("Unable to set relax_domain_level\n");
6436
6437 return 1;
6438 }
6439 __setup("relax_domain_level=", setup_relax_domain_level);
6440
set_domain_attribute(struct sched_domain * sd,struct sched_domain_attr * attr)6441 static void set_domain_attribute(struct sched_domain *sd,
6442 struct sched_domain_attr *attr)
6443 {
6444 int request;
6445
6446 if (!attr || attr->relax_domain_level < 0) {
6447 if (default_relax_domain_level < 0)
6448 return;
6449 else
6450 request = default_relax_domain_level;
6451 } else
6452 request = attr->relax_domain_level;
6453 if (request < sd->level) {
6454 /* turn off idle balance on this domain */
6455 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6456 } else {
6457 /* turn on idle balance on this domain */
6458 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6459 }
6460 }
6461
6462 static void __sdt_free(const struct cpumask *cpu_map);
6463 static int __sdt_alloc(const struct cpumask *cpu_map);
6464
__free_domain_allocs(struct s_data * d,enum s_alloc what,const struct cpumask * cpu_map)6465 static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6466 const struct cpumask *cpu_map)
6467 {
6468 switch (what) {
6469 case sa_rootdomain:
6470 if (!atomic_read(&d->rd->refcount))
6471 free_rootdomain(&d->rd->rcu); /* fall through */
6472 case sa_sd:
6473 free_percpu(d->sd); /* fall through */
6474 case sa_sd_storage:
6475 __sdt_free(cpu_map); /* fall through */
6476 case sa_none:
6477 break;
6478 }
6479 }
6480
__visit_domain_allocation_hell(struct s_data * d,const struct cpumask * cpu_map)6481 static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6482 const struct cpumask *cpu_map)
6483 {
6484 memset(d, 0, sizeof(*d));
6485
6486 if (__sdt_alloc(cpu_map))
6487 return sa_sd_storage;
6488 d->sd = alloc_percpu(struct sched_domain *);
6489 if (!d->sd)
6490 return sa_sd_storage;
6491 d->rd = alloc_rootdomain();
6492 if (!d->rd)
6493 return sa_sd;
6494 return sa_rootdomain;
6495 }
6496
6497 /*
6498 * NULL the sd_data elements we've used to build the sched_domain and
6499 * sched_group structure so that the subsequent __free_domain_allocs()
6500 * will not free the data we're using.
6501 */
claim_allocations(int cpu,struct sched_domain * sd)6502 static void claim_allocations(int cpu, struct sched_domain *sd)
6503 {
6504 struct sd_data *sdd = sd->private;
6505
6506 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6507 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6508
6509 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
6510 *per_cpu_ptr(sdd->sg, cpu) = NULL;
6511
6512 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
6513 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
6514 }
6515
6516 #ifdef CONFIG_NUMA
6517 static int sched_domains_numa_levels;
6518 static int *sched_domains_numa_distance;
6519 static struct cpumask ***sched_domains_numa_masks;
6520 static int sched_domains_curr_level;
6521 #endif
6522
6523 /*
6524 * SD_flags allowed in topology descriptions.
6525 *
6526 * SD_SHARE_CPUCAPACITY - describes SMT topologies
6527 * SD_SHARE_PKG_RESOURCES - describes shared caches
6528 * SD_NUMA - describes NUMA topologies
6529 * SD_SHARE_POWERDOMAIN - describes shared power domain
6530 * SD_SHARE_CAP_STATES - describes shared capacity states
6531 *
6532 * Odd one out:
6533 * SD_ASYM_PACKING - describes SMT quirks
6534 */
6535 #define TOPOLOGY_SD_FLAGS \
6536 (SD_SHARE_CPUCAPACITY | \
6537 SD_SHARE_PKG_RESOURCES | \
6538 SD_NUMA | \
6539 SD_ASYM_PACKING | \
6540 SD_SHARE_POWERDOMAIN | \
6541 SD_SHARE_CAP_STATES)
6542
6543 static struct sched_domain *
sd_init(struct sched_domain_topology_level * tl,int cpu)6544 sd_init(struct sched_domain_topology_level *tl, int cpu)
6545 {
6546 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
6547 int sd_weight, sd_flags = 0;
6548
6549 #ifdef CONFIG_NUMA
6550 /*
6551 * Ugly hack to pass state to sd_numa_mask()...
6552 */
6553 sched_domains_curr_level = tl->numa_level;
6554 #endif
6555
6556 sd_weight = cpumask_weight(tl->mask(cpu));
6557
6558 if (tl->sd_flags)
6559 sd_flags = (*tl->sd_flags)();
6560 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
6561 "wrong sd_flags in topology description\n"))
6562 sd_flags &= ~TOPOLOGY_SD_FLAGS;
6563
6564 *sd = (struct sched_domain){
6565 .min_interval = sd_weight,
6566 .max_interval = 2*sd_weight,
6567 .busy_factor = 32,
6568 .imbalance_pct = 125,
6569
6570 .cache_nice_tries = 0,
6571 .busy_idx = 0,
6572 .idle_idx = 0,
6573 .newidle_idx = 0,
6574 .wake_idx = 0,
6575 .forkexec_idx = 0,
6576
6577 .flags = 1*SD_LOAD_BALANCE
6578 | 1*SD_BALANCE_NEWIDLE
6579 | 1*SD_BALANCE_EXEC
6580 | 1*SD_BALANCE_FORK
6581 | 0*SD_BALANCE_WAKE
6582 | 1*SD_WAKE_AFFINE
6583 | 0*SD_SHARE_CPUCAPACITY
6584 | 0*SD_SHARE_PKG_RESOURCES
6585 | 0*SD_SERIALIZE
6586 | 0*SD_PREFER_SIBLING
6587 | 0*SD_NUMA
6588 | sd_flags
6589 ,
6590
6591 .last_balance = jiffies,
6592 .balance_interval = sd_weight,
6593 .smt_gain = 0,
6594 .max_newidle_lb_cost = 0,
6595 .next_decay_max_lb_cost = jiffies,
6596 #ifdef CONFIG_SCHED_DEBUG
6597 .name = tl->name,
6598 #endif
6599 };
6600
6601 /*
6602 * Convert topological properties into behaviour.
6603 */
6604
6605 if (sd->flags & SD_SHARE_CPUCAPACITY) {
6606 sd->flags |= SD_PREFER_SIBLING;
6607 sd->imbalance_pct = 110;
6608 sd->smt_gain = 1178; /* ~15% */
6609
6610 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6611 sd->imbalance_pct = 117;
6612 sd->cache_nice_tries = 1;
6613 sd->busy_idx = 2;
6614
6615 #ifdef CONFIG_NUMA
6616 } else if (sd->flags & SD_NUMA) {
6617 sd->cache_nice_tries = 2;
6618 sd->busy_idx = 3;
6619 sd->idle_idx = 2;
6620
6621 sd->flags |= SD_SERIALIZE;
6622 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
6623 sd->flags &= ~(SD_BALANCE_EXEC |
6624 SD_BALANCE_FORK |
6625 SD_WAKE_AFFINE);
6626 }
6627
6628 #endif
6629 } else {
6630 sd->flags |= SD_PREFER_SIBLING;
6631 sd->cache_nice_tries = 1;
6632 sd->busy_idx = 2;
6633 sd->idle_idx = 1;
6634 }
6635
6636 sd->private = &tl->data;
6637
6638 return sd;
6639 }
6640
6641 /*
6642 * Topology list, bottom-up.
6643 */
6644 static struct sched_domain_topology_level default_topology[] = {
6645 #ifdef CONFIG_SCHED_SMT
6646 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
6647 #endif
6648 #ifdef CONFIG_SCHED_MC
6649 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
6650 #endif
6651 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
6652 { NULL, },
6653 };
6654
6655 struct sched_domain_topology_level *sched_domain_topology = default_topology;
6656
6657 #define for_each_sd_topology(tl) \
6658 for (tl = sched_domain_topology; tl->mask; tl++)
6659
set_sched_topology(struct sched_domain_topology_level * tl)6660 void set_sched_topology(struct sched_domain_topology_level *tl)
6661 {
6662 sched_domain_topology = tl;
6663 }
6664
6665 #ifdef CONFIG_NUMA
6666
sd_numa_mask(int cpu)6667 static const struct cpumask *sd_numa_mask(int cpu)
6668 {
6669 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6670 }
6671
sched_numa_warn(const char * str)6672 static void sched_numa_warn(const char *str)
6673 {
6674 static int done = false;
6675 int i,j;
6676
6677 if (done)
6678 return;
6679
6680 done = true;
6681
6682 printk(KERN_WARNING "ERROR: %s\n\n", str);
6683
6684 for (i = 0; i < nr_node_ids; i++) {
6685 printk(KERN_WARNING " ");
6686 for (j = 0; j < nr_node_ids; j++)
6687 printk(KERN_CONT "%02d ", node_distance(i,j));
6688 printk(KERN_CONT "\n");
6689 }
6690 printk(KERN_WARNING "\n");
6691 }
6692
find_numa_distance(int distance)6693 static bool find_numa_distance(int distance)
6694 {
6695 int i;
6696
6697 if (distance == node_distance(0, 0))
6698 return true;
6699
6700 for (i = 0; i < sched_domains_numa_levels; i++) {
6701 if (sched_domains_numa_distance[i] == distance)
6702 return true;
6703 }
6704
6705 return false;
6706 }
6707
sched_init_numa(void)6708 static void sched_init_numa(void)
6709 {
6710 int next_distance, curr_distance = node_distance(0, 0);
6711 struct sched_domain_topology_level *tl;
6712 int level = 0;
6713 int i, j, k;
6714
6715 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6716 if (!sched_domains_numa_distance)
6717 return;
6718
6719 /*
6720 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6721 * unique distances in the node_distance() table.
6722 *
6723 * Assumes node_distance(0,j) includes all distances in
6724 * node_distance(i,j) in order to avoid cubic time.
6725 */
6726 next_distance = curr_distance;
6727 for (i = 0; i < nr_node_ids; i++) {
6728 for (j = 0; j < nr_node_ids; j++) {
6729 for (k = 0; k < nr_node_ids; k++) {
6730 int distance = node_distance(i, k);
6731
6732 if (distance > curr_distance &&
6733 (distance < next_distance ||
6734 next_distance == curr_distance))
6735 next_distance = distance;
6736
6737 /*
6738 * While not a strong assumption it would be nice to know
6739 * about cases where if node A is connected to B, B is not
6740 * equally connected to A.
6741 */
6742 if (sched_debug() && node_distance(k, i) != distance)
6743 sched_numa_warn("Node-distance not symmetric");
6744
6745 if (sched_debug() && i && !find_numa_distance(distance))
6746 sched_numa_warn("Node-0 not representative");
6747 }
6748 if (next_distance != curr_distance) {
6749 sched_domains_numa_distance[level++] = next_distance;
6750 sched_domains_numa_levels = level;
6751 curr_distance = next_distance;
6752 } else break;
6753 }
6754
6755 /*
6756 * In case of sched_debug() we verify the above assumption.
6757 */
6758 if (!sched_debug())
6759 break;
6760 }
6761
6762 if (!level)
6763 return;
6764
6765 /*
6766 * 'level' contains the number of unique distances, excluding the
6767 * identity distance node_distance(i,i).
6768 *
6769 * The sched_domains_numa_distance[] array includes the actual distance
6770 * numbers.
6771 */
6772
6773 /*
6774 * Here, we should temporarily reset sched_domains_numa_levels to 0.
6775 * If it fails to allocate memory for array sched_domains_numa_masks[][],
6776 * the array will contain less then 'level' members. This could be
6777 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
6778 * in other functions.
6779 *
6780 * We reset it to 'level' at the end of this function.
6781 */
6782 sched_domains_numa_levels = 0;
6783
6784 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6785 if (!sched_domains_numa_masks)
6786 return;
6787
6788 /*
6789 * Now for each level, construct a mask per node which contains all
6790 * cpus of nodes that are that many hops away from us.
6791 */
6792 for (i = 0; i < level; i++) {
6793 sched_domains_numa_masks[i] =
6794 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6795 if (!sched_domains_numa_masks[i])
6796 return;
6797
6798 for (j = 0; j < nr_node_ids; j++) {
6799 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
6800 if (!mask)
6801 return;
6802
6803 sched_domains_numa_masks[i][j] = mask;
6804
6805 for_each_node(k) {
6806 if (node_distance(j, k) > sched_domains_numa_distance[i])
6807 continue;
6808
6809 cpumask_or(mask, mask, cpumask_of_node(k));
6810 }
6811 }
6812 }
6813
6814 /* Compute default topology size */
6815 for (i = 0; sched_domain_topology[i].mask; i++);
6816
6817 tl = kzalloc((i + level + 1) *
6818 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6819 if (!tl)
6820 return;
6821
6822 /*
6823 * Copy the default topology bits..
6824 */
6825 for (i = 0; sched_domain_topology[i].mask; i++)
6826 tl[i] = sched_domain_topology[i];
6827
6828 /*
6829 * .. and append 'j' levels of NUMA goodness.
6830 */
6831 for (j = 0; j < level; i++, j++) {
6832 tl[i] = (struct sched_domain_topology_level){
6833 .mask = sd_numa_mask,
6834 .sd_flags = cpu_numa_flags,
6835 .flags = SDTL_OVERLAP,
6836 .numa_level = j,
6837 SD_INIT_NAME(NUMA)
6838 };
6839 }
6840
6841 sched_domain_topology = tl;
6842
6843 sched_domains_numa_levels = level;
6844 }
6845
sched_domains_numa_masks_set(int cpu)6846 static void sched_domains_numa_masks_set(int cpu)
6847 {
6848 int i, j;
6849 int node = cpu_to_node(cpu);
6850
6851 for (i = 0; i < sched_domains_numa_levels; i++) {
6852 for (j = 0; j < nr_node_ids; j++) {
6853 if (node_distance(j, node) <= sched_domains_numa_distance[i])
6854 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
6855 }
6856 }
6857 }
6858
sched_domains_numa_masks_clear(int cpu)6859 static void sched_domains_numa_masks_clear(int cpu)
6860 {
6861 int i, j;
6862 for (i = 0; i < sched_domains_numa_levels; i++) {
6863 for (j = 0; j < nr_node_ids; j++)
6864 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
6865 }
6866 }
6867
6868 /*
6869 * Update sched_domains_numa_masks[level][node] array when new cpus
6870 * are onlined.
6871 */
sched_domains_numa_masks_update(struct notifier_block * nfb,unsigned long action,void * hcpu)6872 static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6873 unsigned long action,
6874 void *hcpu)
6875 {
6876 int cpu = (long)hcpu;
6877
6878 switch (action & ~CPU_TASKS_FROZEN) {
6879 case CPU_ONLINE:
6880 sched_domains_numa_masks_set(cpu);
6881 break;
6882
6883 case CPU_DEAD:
6884 sched_domains_numa_masks_clear(cpu);
6885 break;
6886
6887 default:
6888 return NOTIFY_DONE;
6889 }
6890
6891 return NOTIFY_OK;
6892 }
6893 #else
sched_init_numa(void)6894 static inline void sched_init_numa(void)
6895 {
6896 }
6897
sched_domains_numa_masks_update(struct notifier_block * nfb,unsigned long action,void * hcpu)6898 static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6899 unsigned long action,
6900 void *hcpu)
6901 {
6902 return 0;
6903 }
6904 #endif /* CONFIG_NUMA */
6905
__sdt_alloc(const struct cpumask * cpu_map)6906 static int __sdt_alloc(const struct cpumask *cpu_map)
6907 {
6908 struct sched_domain_topology_level *tl;
6909 int j;
6910
6911 for_each_sd_topology(tl) {
6912 struct sd_data *sdd = &tl->data;
6913
6914 sdd->sd = alloc_percpu(struct sched_domain *);
6915 if (!sdd->sd)
6916 return -ENOMEM;
6917
6918 sdd->sg = alloc_percpu(struct sched_group *);
6919 if (!sdd->sg)
6920 return -ENOMEM;
6921
6922 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
6923 if (!sdd->sgc)
6924 return -ENOMEM;
6925
6926 for_each_cpu(j, cpu_map) {
6927 struct sched_domain *sd;
6928 struct sched_group *sg;
6929 struct sched_group_capacity *sgc;
6930
6931 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6932 GFP_KERNEL, cpu_to_node(j));
6933 if (!sd)
6934 return -ENOMEM;
6935
6936 *per_cpu_ptr(sdd->sd, j) = sd;
6937
6938 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6939 GFP_KERNEL, cpu_to_node(j));
6940 if (!sg)
6941 return -ENOMEM;
6942
6943 sg->next = sg;
6944
6945 *per_cpu_ptr(sdd->sg, j) = sg;
6946
6947 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
6948 GFP_KERNEL, cpu_to_node(j));
6949 if (!sgc)
6950 return -ENOMEM;
6951
6952 *per_cpu_ptr(sdd->sgc, j) = sgc;
6953 }
6954 }
6955
6956 return 0;
6957 }
6958
__sdt_free(const struct cpumask * cpu_map)6959 static void __sdt_free(const struct cpumask *cpu_map)
6960 {
6961 struct sched_domain_topology_level *tl;
6962 int j;
6963
6964 for_each_sd_topology(tl) {
6965 struct sd_data *sdd = &tl->data;
6966
6967 for_each_cpu(j, cpu_map) {
6968 struct sched_domain *sd;
6969
6970 if (sdd->sd) {
6971 sd = *per_cpu_ptr(sdd->sd, j);
6972 if (sd && (sd->flags & SD_OVERLAP))
6973 free_sched_groups(sd->groups, 0);
6974 kfree(*per_cpu_ptr(sdd->sd, j));
6975 }
6976
6977 if (sdd->sg)
6978 kfree(*per_cpu_ptr(sdd->sg, j));
6979 if (sdd->sgc)
6980 kfree(*per_cpu_ptr(sdd->sgc, j));
6981 }
6982 free_percpu(sdd->sd);
6983 sdd->sd = NULL;
6984 free_percpu(sdd->sg);
6985 sdd->sg = NULL;
6986 free_percpu(sdd->sgc);
6987 sdd->sgc = NULL;
6988 }
6989 }
6990
build_sched_domain(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain_attr * attr,struct sched_domain * child,int cpu)6991 struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6992 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6993 struct sched_domain *child, int cpu)
6994 {
6995 struct sched_domain *sd = sd_init(tl, cpu);
6996 if (!sd)
6997 return child;
6998
6999 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
7000 if (child) {
7001 sd->level = child->level + 1;
7002 sched_domain_level_max = max(sched_domain_level_max, sd->level);
7003 child->parent = sd;
7004 sd->child = child;
7005
7006 if (!cpumask_subset(sched_domain_span(child),
7007 sched_domain_span(sd))) {
7008 pr_err("BUG: arch topology borken\n");
7009 #ifdef CONFIG_SCHED_DEBUG
7010 pr_err(" the %s domain not a subset of the %s domain\n",
7011 child->name, sd->name);
7012 #endif
7013 /* Fixup, ensure @sd has at least @child cpus. */
7014 cpumask_or(sched_domain_span(sd),
7015 sched_domain_span(sd),
7016 sched_domain_span(child));
7017 }
7018
7019 }
7020 set_domain_attribute(sd, attr);
7021
7022 return sd;
7023 }
7024
7025 /*
7026 * Build sched domains for a given set of cpus and attach the sched domains
7027 * to the individual cpus
7028 */
build_sched_domains(const struct cpumask * cpu_map,struct sched_domain_attr * attr)7029 static int build_sched_domains(const struct cpumask *cpu_map,
7030 struct sched_domain_attr *attr)
7031 {
7032 enum s_alloc alloc_state;
7033 struct sched_domain *sd;
7034 struct s_data d;
7035 struct rq *rq = NULL;
7036 int i, ret = -ENOMEM;
7037
7038 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7039 if (alloc_state != sa_rootdomain)
7040 goto error;
7041
7042 /* Set up domains for cpus specified by the cpu_map. */
7043 for_each_cpu(i, cpu_map) {
7044 struct sched_domain_topology_level *tl;
7045
7046 sd = NULL;
7047 for_each_sd_topology(tl) {
7048 sd = build_sched_domain(tl, cpu_map, attr, sd, i);
7049 if (tl == sched_domain_topology)
7050 *per_cpu_ptr(d.sd, i) = sd;
7051 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
7052 sd->flags |= SD_OVERLAP;
7053 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
7054 break;
7055 }
7056 }
7057
7058 /* Build the groups for the domains */
7059 for_each_cpu(i, cpu_map) {
7060 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7061 sd->span_weight = cpumask_weight(sched_domain_span(sd));
7062 if (sd->flags & SD_OVERLAP) {
7063 if (build_overlap_sched_groups(sd, i))
7064 goto error;
7065 } else {
7066 if (build_sched_groups(sd, i))
7067 goto error;
7068 }
7069 }
7070 }
7071
7072 /* Calculate CPU capacity for physical packages and nodes */
7073 for (i = nr_cpumask_bits-1; i >= 0; i--) {
7074 struct sched_domain_topology_level *tl = sched_domain_topology;
7075
7076 if (!cpumask_test_cpu(i, cpu_map))
7077 continue;
7078
7079 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent, tl++) {
7080 init_sched_energy(i, sd, tl->energy);
7081 claim_allocations(i, sd);
7082 init_sched_groups_capacity(i, sd);
7083 }
7084 }
7085
7086 /* Attach the domains */
7087 rcu_read_lock();
7088 for_each_cpu(i, cpu_map) {
7089 rq = cpu_rq(i);
7090 sd = *per_cpu_ptr(d.sd, i);
7091 cpu_attach_domain(sd, d.rd, i);
7092 }
7093 rcu_read_unlock();
7094
7095 ret = 0;
7096 error:
7097 __free_domain_allocs(&d, alloc_state, cpu_map);
7098 return ret;
7099 }
7100
7101 static cpumask_var_t *doms_cur; /* current sched domains */
7102 static int ndoms_cur; /* number of sched domains in 'doms_cur' */
7103 static struct sched_domain_attr *dattr_cur;
7104 /* attribues of custom domains in 'doms_cur' */
7105
7106 /*
7107 * Special case: If a kmalloc of a doms_cur partition (array of
7108 * cpumask) fails, then fallback to a single sched domain,
7109 * as determined by the single cpumask fallback_doms.
7110 */
7111 static cpumask_var_t fallback_doms;
7112
7113 /*
7114 * arch_update_cpu_topology lets virtualized architectures update the
7115 * cpu core maps. It is supposed to return 1 if the topology changed
7116 * or 0 if it stayed the same.
7117 */
arch_update_cpu_topology(void)7118 int __weak arch_update_cpu_topology(void)
7119 {
7120 return 0;
7121 }
7122
alloc_sched_domains(unsigned int ndoms)7123 cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7124 {
7125 int i;
7126 cpumask_var_t *doms;
7127
7128 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7129 if (!doms)
7130 return NULL;
7131 for (i = 0; i < ndoms; i++) {
7132 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7133 free_sched_domains(doms, i);
7134 return NULL;
7135 }
7136 }
7137 return doms;
7138 }
7139
free_sched_domains(cpumask_var_t doms[],unsigned int ndoms)7140 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7141 {
7142 unsigned int i;
7143 for (i = 0; i < ndoms; i++)
7144 free_cpumask_var(doms[i]);
7145 kfree(doms);
7146 }
7147
7148 /*
7149 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7150 * For now this just excludes isolated cpus, but could be used to
7151 * exclude other special cases in the future.
7152 */
init_sched_domains(const struct cpumask * cpu_map)7153 static int init_sched_domains(const struct cpumask *cpu_map)
7154 {
7155 int err;
7156
7157 arch_update_cpu_topology();
7158 ndoms_cur = 1;
7159 doms_cur = alloc_sched_domains(ndoms_cur);
7160 if (!doms_cur)
7161 doms_cur = &fallback_doms;
7162 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
7163 err = build_sched_domains(doms_cur[0], NULL);
7164 register_sched_domain_sysctl();
7165
7166 return err;
7167 }
7168
7169 /*
7170 * Detach sched domains from a group of cpus specified in cpu_map
7171 * These cpus will now be attached to the NULL domain
7172 */
detach_destroy_domains(const struct cpumask * cpu_map)7173 static void detach_destroy_domains(const struct cpumask *cpu_map)
7174 {
7175 int i;
7176
7177 rcu_read_lock();
7178 for_each_cpu(i, cpu_map)
7179 cpu_attach_domain(NULL, &def_root_domain, i);
7180 rcu_read_unlock();
7181 }
7182
7183 /* handle null as "default" */
dattrs_equal(struct sched_domain_attr * cur,int idx_cur,struct sched_domain_attr * new,int idx_new)7184 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7185 struct sched_domain_attr *new, int idx_new)
7186 {
7187 struct sched_domain_attr tmp;
7188
7189 /* fast path */
7190 if (!new && !cur)
7191 return 1;
7192
7193 tmp = SD_ATTR_INIT;
7194 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7195 new ? (new + idx_new) : &tmp,
7196 sizeof(struct sched_domain_attr));
7197 }
7198
7199 /*
7200 * Partition sched domains as specified by the 'ndoms_new'
7201 * cpumasks in the array doms_new[] of cpumasks. This compares
7202 * doms_new[] to the current sched domain partitioning, doms_cur[].
7203 * It destroys each deleted domain and builds each new domain.
7204 *
7205 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
7206 * The masks don't intersect (don't overlap.) We should setup one
7207 * sched domain for each mask. CPUs not in any of the cpumasks will
7208 * not be load balanced. If the same cpumask appears both in the
7209 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7210 * it as it is.
7211 *
7212 * The passed in 'doms_new' should be allocated using
7213 * alloc_sched_domains. This routine takes ownership of it and will
7214 * free_sched_domains it when done with it. If the caller failed the
7215 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7216 * and partition_sched_domains() will fallback to the single partition
7217 * 'fallback_doms', it also forces the domains to be rebuilt.
7218 *
7219 * If doms_new == NULL it will be replaced with cpu_online_mask.
7220 * ndoms_new == 0 is a special case for destroying existing domains,
7221 * and it will not create the default domain.
7222 *
7223 * Call with hotplug lock held
7224 */
partition_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)7225 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
7226 struct sched_domain_attr *dattr_new)
7227 {
7228 int i, j, n;
7229 int new_topology;
7230
7231 mutex_lock(&sched_domains_mutex);
7232
7233 /* always unregister in case we don't destroy any domains */
7234 unregister_sched_domain_sysctl();
7235
7236 /* Let architecture update cpu core mappings. */
7237 new_topology = arch_update_cpu_topology();
7238
7239 n = doms_new ? ndoms_new : 0;
7240
7241 /* Destroy deleted domains */
7242 for (i = 0; i < ndoms_cur; i++) {
7243 for (j = 0; j < n && !new_topology; j++) {
7244 if (cpumask_equal(doms_cur[i], doms_new[j])
7245 && dattrs_equal(dattr_cur, i, dattr_new, j))
7246 goto match1;
7247 }
7248 /* no match - a current sched domain not in new doms_new[] */
7249 detach_destroy_domains(doms_cur[i]);
7250 match1:
7251 ;
7252 }
7253
7254 n = ndoms_cur;
7255 if (doms_new == NULL) {
7256 n = 0;
7257 doms_new = &fallback_doms;
7258 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
7259 WARN_ON_ONCE(dattr_new);
7260 }
7261
7262 /* Build new domains */
7263 for (i = 0; i < ndoms_new; i++) {
7264 for (j = 0; j < n && !new_topology; j++) {
7265 if (cpumask_equal(doms_new[i], doms_cur[j])
7266 && dattrs_equal(dattr_new, i, dattr_cur, j))
7267 goto match2;
7268 }
7269 /* no match - add a new doms_new */
7270 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
7271 match2:
7272 ;
7273 }
7274
7275 /* Remember the new sched domains */
7276 if (doms_cur != &fallback_doms)
7277 free_sched_domains(doms_cur, ndoms_cur);
7278 kfree(dattr_cur); /* kfree(NULL) is safe */
7279 doms_cur = doms_new;
7280 dattr_cur = dattr_new;
7281 ndoms_cur = ndoms_new;
7282
7283 register_sched_domain_sysctl();
7284
7285 mutex_unlock(&sched_domains_mutex);
7286 }
7287
7288 static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
7289
7290 /*
7291 * Update cpusets according to cpu_active mask. If cpusets are
7292 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7293 * around partition_sched_domains().
7294 *
7295 * If we come here as part of a suspend/resume, don't touch cpusets because we
7296 * want to restore it back to its original state upon resume anyway.
7297 */
cpuset_cpu_active(struct notifier_block * nfb,unsigned long action,void * hcpu)7298 static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7299 void *hcpu)
7300 {
7301 switch (action) {
7302 case CPU_ONLINE_FROZEN:
7303 case CPU_DOWN_FAILED_FROZEN:
7304
7305 /*
7306 * num_cpus_frozen tracks how many CPUs are involved in suspend
7307 * resume sequence. As long as this is not the last online
7308 * operation in the resume sequence, just build a single sched
7309 * domain, ignoring cpusets.
7310 */
7311 num_cpus_frozen--;
7312 if (likely(num_cpus_frozen)) {
7313 partition_sched_domains(1, NULL, NULL);
7314 break;
7315 }
7316
7317 /*
7318 * This is the last CPU online operation. So fall through and
7319 * restore the original sched domains by considering the
7320 * cpuset configurations.
7321 */
7322
7323 case CPU_ONLINE:
7324 case CPU_DOWN_FAILED:
7325 cpuset_update_active_cpus(true);
7326 break;
7327 default:
7328 return NOTIFY_DONE;
7329 }
7330 return NOTIFY_OK;
7331 }
7332
cpuset_cpu_inactive(struct notifier_block * nfb,unsigned long action,void * hcpu)7333 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7334 void *hcpu)
7335 {
7336 switch (action) {
7337 case CPU_DOWN_PREPARE:
7338 cpuset_update_active_cpus(false);
7339 break;
7340 case CPU_DOWN_PREPARE_FROZEN:
7341 num_cpus_frozen++;
7342 partition_sched_domains(1, NULL, NULL);
7343 break;
7344 default:
7345 return NOTIFY_DONE;
7346 }
7347 return NOTIFY_OK;
7348 }
7349
sched_init_smp(void)7350 void __init sched_init_smp(void)
7351 {
7352 cpumask_var_t non_isolated_cpus;
7353
7354 walt_init_cpu_efficiency();
7355 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
7356 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
7357
7358 sched_init_numa();
7359
7360 /*
7361 * There's no userspace yet to cause hotplug operations; hence all the
7362 * cpu masks are stable and all blatant races in the below code cannot
7363 * happen.
7364 */
7365 mutex_lock(&sched_domains_mutex);
7366 init_sched_domains(cpu_active_mask);
7367 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7368 if (cpumask_empty(non_isolated_cpus))
7369 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
7370 mutex_unlock(&sched_domains_mutex);
7371
7372 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
7373 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7374 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
7375
7376 init_hrtick();
7377
7378 /* Move init over to a non-isolated CPU */
7379 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
7380 BUG();
7381 sched_init_granularity();
7382 free_cpumask_var(non_isolated_cpus);
7383
7384 init_sched_rt_class();
7385 init_sched_dl_class();
7386 }
7387 #else
sched_init_smp(void)7388 void __init sched_init_smp(void)
7389 {
7390 sched_init_granularity();
7391 }
7392 #endif /* CONFIG_SMP */
7393
7394 const_debug unsigned int sysctl_timer_migration = 1;
7395
in_sched_functions(unsigned long addr)7396 int in_sched_functions(unsigned long addr)
7397 {
7398 return in_lock_functions(addr) ||
7399 (addr >= (unsigned long)__sched_text_start
7400 && addr < (unsigned long)__sched_text_end);
7401 }
7402
7403 #ifdef CONFIG_CGROUP_SCHED
7404 /*
7405 * Default task group.
7406 * Every task in system belongs to this group at bootup.
7407 */
7408 struct task_group root_task_group;
7409 LIST_HEAD(task_groups);
7410
7411 /* Cacheline aligned slab cache for task_group */
7412 static struct kmem_cache *task_group_cache __read_mostly;
7413 #endif
7414
7415 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
7416
sched_init(void)7417 void __init sched_init(void)
7418 {
7419 int i, j;
7420 unsigned long alloc_size = 0, ptr;
7421
7422 #ifdef CONFIG_FAIR_GROUP_SCHED
7423 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7424 #endif
7425 #ifdef CONFIG_RT_GROUP_SCHED
7426 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7427 #endif
7428 #ifdef CONFIG_CPUMASK_OFFSTACK
7429 alloc_size += num_possible_cpus() * cpumask_size();
7430 #endif
7431 if (alloc_size) {
7432 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
7433
7434 #ifdef CONFIG_FAIR_GROUP_SCHED
7435 root_task_group.se = (struct sched_entity **)ptr;
7436 ptr += nr_cpu_ids * sizeof(void **);
7437
7438 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
7439 ptr += nr_cpu_ids * sizeof(void **);
7440
7441 #endif /* CONFIG_FAIR_GROUP_SCHED */
7442 #ifdef CONFIG_RT_GROUP_SCHED
7443 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
7444 ptr += nr_cpu_ids * sizeof(void **);
7445
7446 root_task_group.rt_rq = (struct rt_rq **)ptr;
7447 ptr += nr_cpu_ids * sizeof(void **);
7448
7449 #endif /* CONFIG_RT_GROUP_SCHED */
7450 #ifdef CONFIG_CPUMASK_OFFSTACK
7451 for_each_possible_cpu(i) {
7452 per_cpu(load_balance_mask, i) = (void *)ptr;
7453 ptr += cpumask_size();
7454 }
7455 #endif /* CONFIG_CPUMASK_OFFSTACK */
7456 }
7457
7458 init_rt_bandwidth(&def_rt_bandwidth,
7459 global_rt_period(), global_rt_runtime());
7460 init_dl_bandwidth(&def_dl_bandwidth,
7461 global_rt_period(), global_rt_runtime());
7462
7463 #ifdef CONFIG_SMP
7464 init_defrootdomain();
7465 #endif
7466
7467 #ifdef CONFIG_RT_GROUP_SCHED
7468 init_rt_bandwidth(&root_task_group.rt_bandwidth,
7469 global_rt_period(), global_rt_runtime());
7470 #endif /* CONFIG_RT_GROUP_SCHED */
7471
7472 #ifdef CONFIG_CGROUP_SCHED
7473 task_group_cache = KMEM_CACHE(task_group, 0);
7474
7475 list_add(&root_task_group.list, &task_groups);
7476 INIT_LIST_HEAD(&root_task_group.children);
7477 INIT_LIST_HEAD(&root_task_group.siblings);
7478 autogroup_init(&init_task);
7479 #endif /* CONFIG_CGROUP_SCHED */
7480
7481 for_each_possible_cpu(i) {
7482 struct rq *rq;
7483
7484 rq = cpu_rq(i);
7485 raw_spin_lock_init(&rq->lock);
7486 rq->nr_running = 0;
7487 rq->calc_load_active = 0;
7488 rq->calc_load_update = jiffies + LOAD_FREQ;
7489 init_cfs_rq(&rq->cfs);
7490 init_rt_rq(&rq->rt, rq);
7491 init_dl_rq(&rq->dl, rq);
7492 #ifdef CONFIG_FAIR_GROUP_SCHED
7493 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
7494 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7495 /*
7496 * How much cpu bandwidth does root_task_group get?
7497 *
7498 * In case of task-groups formed thr' the cgroup filesystem, it
7499 * gets 100% of the cpu resources in the system. This overall
7500 * system cpu resource is divided among the tasks of
7501 * root_task_group and its child task-groups in a fair manner,
7502 * based on each entity's (task or task-group's) weight
7503 * (se->load.weight).
7504 *
7505 * In other words, if root_task_group has 10 tasks of weight
7506 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7507 * then A0's share of the cpu resource is:
7508 *
7509 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
7510 *
7511 * We achieve this by letting root_task_group's tasks sit
7512 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
7513 */
7514 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
7515 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
7516 #endif /* CONFIG_FAIR_GROUP_SCHED */
7517
7518 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
7519 #ifdef CONFIG_RT_GROUP_SCHED
7520 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
7521 #endif
7522
7523 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7524 rq->cpu_load[j] = 0;
7525
7526 rq->last_load_update_tick = jiffies;
7527
7528 #ifdef CONFIG_SMP
7529 rq->sd = NULL;
7530 rq->rd = NULL;
7531 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
7532 rq->post_schedule = 0;
7533 rq->active_balance = 0;
7534 rq->next_balance = jiffies;
7535 rq->push_cpu = 0;
7536 rq->cpu = i;
7537 rq->online = 0;
7538 rq->idle_stamp = 0;
7539 rq->avg_idle = 2*sysctl_sched_migration_cost;
7540 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
7541 #ifdef CONFIG_SCHED_WALT
7542 rq->cur_irqload = 0;
7543 rq->avg_irqload = 0;
7544 rq->irqload_ts = 0;
7545 #endif
7546
7547 INIT_LIST_HEAD(&rq->cfs_tasks);
7548
7549 rq_attach_root(rq, &def_root_domain);
7550 #ifdef CONFIG_NO_HZ_COMMON
7551 rq->nohz_flags = 0;
7552 #endif
7553 #ifdef CONFIG_NO_HZ_FULL
7554 rq->last_sched_tick = 0;
7555 #endif
7556 #endif
7557 init_rq_hrtick(rq);
7558 atomic_set(&rq->nr_iowait, 0);
7559 }
7560
7561 set_load_weight(&init_task);
7562
7563 #ifdef CONFIG_PREEMPT_NOTIFIERS
7564 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7565 #endif
7566
7567 /*
7568 * The boot idle thread does lazy MMU switching as well:
7569 */
7570 atomic_inc(&init_mm.mm_count);
7571 enter_lazy_tlb(&init_mm, current);
7572
7573 /*
7574 * During early bootup we pretend to be a normal task:
7575 */
7576 current->sched_class = &fair_sched_class;
7577
7578 /*
7579 * Make us the idle thread. Technically, schedule() should not be
7580 * called from this thread, however somewhere below it might be,
7581 * but because we are the idle thread, we just pick up running again
7582 * when this runqueue becomes "idle".
7583 */
7584 init_idle(current, smp_processor_id());
7585
7586 calc_load_update = jiffies + LOAD_FREQ;
7587
7588 #ifdef CONFIG_SMP
7589 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
7590 /* May be allocated at isolcpus cmdline parse time */
7591 if (cpu_isolated_map == NULL)
7592 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
7593 idle_thread_set_boot_cpu();
7594 set_cpu_rq_start_time();
7595 #endif
7596 init_sched_fair_class();
7597
7598 scheduler_running = 1;
7599 }
7600
7601 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
preempt_count_equals(int preempt_offset)7602 static inline int preempt_count_equals(int preempt_offset)
7603 {
7604 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
7605
7606 return (nested == preempt_offset);
7607 }
7608
7609 static int __might_sleep_init_called;
__might_sleep_init(void)7610 int __init __might_sleep_init(void)
7611 {
7612 __might_sleep_init_called = 1;
7613 return 0;
7614 }
7615 early_initcall(__might_sleep_init);
7616
__might_sleep(const char * file,int line,int preempt_offset)7617 void __might_sleep(const char *file, int line, int preempt_offset)
7618 {
7619 static unsigned long prev_jiffy; /* ratelimiting */
7620
7621 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
7622 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
7623 !is_idle_task(current)) || oops_in_progress)
7624 return;
7625 if (system_state != SYSTEM_RUNNING &&
7626 (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
7627 return;
7628 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7629 return;
7630 prev_jiffy = jiffies;
7631
7632 printk(KERN_ERR
7633 "BUG: sleeping function called from invalid context at %s:%d\n",
7634 file, line);
7635 printk(KERN_ERR
7636 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7637 in_atomic(), irqs_disabled(),
7638 current->pid, current->comm);
7639
7640 debug_show_held_locks(current);
7641 if (irqs_disabled())
7642 print_irqtrace_events(current);
7643 #ifdef CONFIG_DEBUG_PREEMPT
7644 if (!preempt_count_equals(preempt_offset)) {
7645 pr_err("Preemption disabled at:");
7646 print_ip_sym(current->preempt_disable_ip);
7647 pr_cont("\n");
7648 }
7649 #endif
7650 dump_stack();
7651 }
7652 EXPORT_SYMBOL(__might_sleep);
7653 #endif
7654
7655 #ifdef CONFIG_MAGIC_SYSRQ
normalize_task(struct rq * rq,struct task_struct * p)7656 static void normalize_task(struct rq *rq, struct task_struct *p)
7657 {
7658 const struct sched_class *prev_class = p->sched_class;
7659 struct sched_attr attr = {
7660 .sched_policy = SCHED_NORMAL,
7661 };
7662 int old_prio = p->prio;
7663 int queued;
7664
7665 queued = task_on_rq_queued(p);
7666 if (queued)
7667 dequeue_task(rq, p, 0);
7668 __setscheduler(rq, p, &attr, false);
7669 if (queued) {
7670 enqueue_task(rq, p, 0);
7671 resched_curr(rq);
7672 }
7673
7674 check_class_changed(rq, p, prev_class, old_prio);
7675 }
7676
normalize_rt_tasks(void)7677 void normalize_rt_tasks(void)
7678 {
7679 struct task_struct *g, *p;
7680 unsigned long flags;
7681 struct rq *rq;
7682
7683 read_lock(&tasklist_lock);
7684 for_each_process_thread(g, p) {
7685 /*
7686 * Only normalize user tasks:
7687 */
7688 if (p->flags & PF_KTHREAD)
7689 continue;
7690
7691 p->se.exec_start = 0;
7692 #ifdef CONFIG_SCHEDSTATS
7693 p->se.statistics.wait_start = 0;
7694 p->se.statistics.sleep_start = 0;
7695 p->se.statistics.block_start = 0;
7696 #endif
7697
7698 if (!dl_task(p) && !rt_task(p)) {
7699 /*
7700 * Renice negative nice level userspace
7701 * tasks back to 0:
7702 */
7703 if (task_nice(p) < 0)
7704 set_user_nice(p, 0);
7705 continue;
7706 }
7707
7708 rq = task_rq_lock(p, &flags);
7709 normalize_task(rq, p);
7710 task_rq_unlock(rq, p, &flags);
7711 }
7712 read_unlock(&tasklist_lock);
7713 }
7714
7715 #endif /* CONFIG_MAGIC_SYSRQ */
7716
7717 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
7718 /*
7719 * These functions are only useful for the IA64 MCA handling, or kdb.
7720 *
7721 * They can only be called when the whole system has been
7722 * stopped - every CPU needs to be quiescent, and no scheduling
7723 * activity can take place. Using them for anything else would
7724 * be a serious bug, and as a result, they aren't even visible
7725 * under any other configuration.
7726 */
7727
7728 /**
7729 * curr_task - return the current task for a given cpu.
7730 * @cpu: the processor in question.
7731 *
7732 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7733 *
7734 * Return: The current task for @cpu.
7735 */
curr_task(int cpu)7736 struct task_struct *curr_task(int cpu)
7737 {
7738 return cpu_curr(cpu);
7739 }
7740
7741 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7742
7743 #ifdef CONFIG_IA64
7744 /**
7745 * set_curr_task - set the current task for a given cpu.
7746 * @cpu: the processor in question.
7747 * @p: the task pointer to set.
7748 *
7749 * Description: This function must only be used when non-maskable interrupts
7750 * are serviced on a separate stack. It allows the architecture to switch the
7751 * notion of the current task on a cpu in a non-blocking manner. This function
7752 * must be called with all CPU's synchronized, and interrupts disabled, the
7753 * and caller must save the original value of the current task (see
7754 * curr_task() above) and restore that value before reenabling interrupts and
7755 * re-starting the system.
7756 *
7757 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7758 */
set_curr_task(int cpu,struct task_struct * p)7759 void set_curr_task(int cpu, struct task_struct *p)
7760 {
7761 cpu_curr(cpu) = p;
7762 }
7763
7764 #endif
7765
7766 #ifdef CONFIG_CGROUP_SCHED
7767 /* task_group_lock serializes the addition/removal of task groups */
7768 static DEFINE_SPINLOCK(task_group_lock);
7769
free_sched_group(struct task_group * tg)7770 static void free_sched_group(struct task_group *tg)
7771 {
7772 free_fair_sched_group(tg);
7773 free_rt_sched_group(tg);
7774 autogroup_free(tg);
7775 kmem_cache_free(task_group_cache, tg);
7776 }
7777
7778 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)7779 struct task_group *sched_create_group(struct task_group *parent)
7780 {
7781 struct task_group *tg;
7782
7783 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
7784 if (!tg)
7785 return ERR_PTR(-ENOMEM);
7786
7787 if (!alloc_fair_sched_group(tg, parent))
7788 goto err;
7789
7790 if (!alloc_rt_sched_group(tg, parent))
7791 goto err;
7792
7793 return tg;
7794
7795 err:
7796 free_sched_group(tg);
7797 return ERR_PTR(-ENOMEM);
7798 }
7799
sched_online_group(struct task_group * tg,struct task_group * parent)7800 void sched_online_group(struct task_group *tg, struct task_group *parent)
7801 {
7802 unsigned long flags;
7803
7804 spin_lock_irqsave(&task_group_lock, flags);
7805 list_add_rcu(&tg->list, &task_groups);
7806
7807 WARN_ON(!parent); /* root should already exist */
7808
7809 tg->parent = parent;
7810 INIT_LIST_HEAD(&tg->children);
7811 list_add_rcu(&tg->siblings, &parent->children);
7812 spin_unlock_irqrestore(&task_group_lock, flags);
7813 }
7814
7815 /* rcu callback to free various structures associated with a task group */
free_sched_group_rcu(struct rcu_head * rhp)7816 static void free_sched_group_rcu(struct rcu_head *rhp)
7817 {
7818 /* now it should be safe to free those cfs_rqs */
7819 free_sched_group(container_of(rhp, struct task_group, rcu));
7820 }
7821
7822 /* Destroy runqueue etc associated with a task group */
sched_destroy_group(struct task_group * tg)7823 void sched_destroy_group(struct task_group *tg)
7824 {
7825 /* wait for possible concurrent references to cfs_rqs complete */
7826 call_rcu(&tg->rcu, free_sched_group_rcu);
7827 }
7828
sched_offline_group(struct task_group * tg)7829 void sched_offline_group(struct task_group *tg)
7830 {
7831 unsigned long flags;
7832 int i;
7833
7834 /* end participation in shares distribution */
7835 for_each_possible_cpu(i)
7836 unregister_fair_sched_group(tg, i);
7837
7838 spin_lock_irqsave(&task_group_lock, flags);
7839 list_del_rcu(&tg->list);
7840 list_del_rcu(&tg->siblings);
7841 spin_unlock_irqrestore(&task_group_lock, flags);
7842 }
7843
7844 /* change task's runqueue when it moves between groups.
7845 * The caller of this function should have put the task in its new group
7846 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7847 * reflect its new group.
7848 */
sched_move_task(struct task_struct * tsk)7849 void sched_move_task(struct task_struct *tsk)
7850 {
7851 struct task_group *tg;
7852 int queued, running;
7853 unsigned long flags;
7854 struct rq *rq;
7855
7856 rq = task_rq_lock(tsk, &flags);
7857
7858 running = task_current(rq, tsk);
7859 queued = task_on_rq_queued(tsk);
7860
7861 if (queued)
7862 dequeue_task(rq, tsk, 0);
7863 if (unlikely(running))
7864 put_prev_task(rq, tsk);
7865
7866 /*
7867 * All callers are synchronized by task_rq_lock(); we do not use RCU
7868 * which is pointless here. Thus, we pass "true" to task_css_check()
7869 * to prevent lockdep warnings.
7870 */
7871 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
7872 struct task_group, css);
7873 tg = autogroup_task_group(tsk, tg);
7874 tsk->sched_task_group = tg;
7875
7876 #ifdef CONFIG_FAIR_GROUP_SCHED
7877 if (tsk->sched_class->task_move_group)
7878 tsk->sched_class->task_move_group(tsk);
7879 else
7880 #endif
7881 set_task_rq(tsk, task_cpu(tsk));
7882
7883 if (unlikely(running))
7884 tsk->sched_class->set_curr_task(rq);
7885 if (queued)
7886 enqueue_task(rq, tsk, 0);
7887
7888 task_rq_unlock(rq, tsk, &flags);
7889 }
7890 #endif /* CONFIG_CGROUP_SCHED */
7891
7892 #ifdef CONFIG_RT_GROUP_SCHED
7893 /*
7894 * Ensure that the real time constraints are schedulable.
7895 */
7896 static DEFINE_MUTEX(rt_constraints_mutex);
7897
7898 /* Must be called with tasklist_lock held */
tg_has_rt_tasks(struct task_group * tg)7899 static inline int tg_has_rt_tasks(struct task_group *tg)
7900 {
7901 struct task_struct *g, *p;
7902
7903 /*
7904 * Autogroups do not have RT tasks; see autogroup_create().
7905 */
7906 if (task_group_is_autogroup(tg))
7907 return 0;
7908
7909 for_each_process_thread(g, p) {
7910 if (rt_task(p) && task_group(p) == tg)
7911 return 1;
7912 }
7913
7914 return 0;
7915 }
7916
7917 struct rt_schedulable_data {
7918 struct task_group *tg;
7919 u64 rt_period;
7920 u64 rt_runtime;
7921 };
7922
tg_rt_schedulable(struct task_group * tg,void * data)7923 static int tg_rt_schedulable(struct task_group *tg, void *data)
7924 {
7925 struct rt_schedulable_data *d = data;
7926 struct task_group *child;
7927 unsigned long total, sum = 0;
7928 u64 period, runtime;
7929
7930 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7931 runtime = tg->rt_bandwidth.rt_runtime;
7932
7933 if (tg == d->tg) {
7934 period = d->rt_period;
7935 runtime = d->rt_runtime;
7936 }
7937
7938 /*
7939 * Cannot have more runtime than the period.
7940 */
7941 if (runtime > period && runtime != RUNTIME_INF)
7942 return -EINVAL;
7943
7944 /*
7945 * Ensure we don't starve existing RT tasks.
7946 */
7947 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7948 return -EBUSY;
7949
7950 total = to_ratio(period, runtime);
7951
7952 /*
7953 * Nobody can have more than the global setting allows.
7954 */
7955 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7956 return -EINVAL;
7957
7958 /*
7959 * The sum of our children's runtime should not exceed our own.
7960 */
7961 list_for_each_entry_rcu(child, &tg->children, siblings) {
7962 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7963 runtime = child->rt_bandwidth.rt_runtime;
7964
7965 if (child == d->tg) {
7966 period = d->rt_period;
7967 runtime = d->rt_runtime;
7968 }
7969
7970 sum += to_ratio(period, runtime);
7971 }
7972
7973 if (sum > total)
7974 return -EINVAL;
7975
7976 return 0;
7977 }
7978
__rt_schedulable(struct task_group * tg,u64 period,u64 runtime)7979 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
7980 {
7981 int ret;
7982
7983 struct rt_schedulable_data data = {
7984 .tg = tg,
7985 .rt_period = period,
7986 .rt_runtime = runtime,
7987 };
7988
7989 rcu_read_lock();
7990 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7991 rcu_read_unlock();
7992
7993 return ret;
7994 }
7995
tg_set_rt_bandwidth(struct task_group * tg,u64 rt_period,u64 rt_runtime)7996 static int tg_set_rt_bandwidth(struct task_group *tg,
7997 u64 rt_period, u64 rt_runtime)
7998 {
7999 int i, err = 0;
8000
8001 mutex_lock(&rt_constraints_mutex);
8002 read_lock(&tasklist_lock);
8003 err = __rt_schedulable(tg, rt_period, rt_runtime);
8004 if (err)
8005 goto unlock;
8006
8007 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
8008 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8009 tg->rt_bandwidth.rt_runtime = rt_runtime;
8010
8011 for_each_possible_cpu(i) {
8012 struct rt_rq *rt_rq = tg->rt_rq[i];
8013
8014 raw_spin_lock(&rt_rq->rt_runtime_lock);
8015 rt_rq->rt_runtime = rt_runtime;
8016 raw_spin_unlock(&rt_rq->rt_runtime_lock);
8017 }
8018 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
8019 unlock:
8020 read_unlock(&tasklist_lock);
8021 mutex_unlock(&rt_constraints_mutex);
8022
8023 return err;
8024 }
8025
sched_group_set_rt_runtime(struct task_group * tg,long rt_runtime_us)8026 static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8027 {
8028 u64 rt_runtime, rt_period;
8029
8030 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8031 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8032 if (rt_runtime_us < 0)
8033 rt_runtime = RUNTIME_INF;
8034
8035 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
8036 }
8037
sched_group_rt_runtime(struct task_group * tg)8038 static long sched_group_rt_runtime(struct task_group *tg)
8039 {
8040 u64 rt_runtime_us;
8041
8042 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
8043 return -1;
8044
8045 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
8046 do_div(rt_runtime_us, NSEC_PER_USEC);
8047 return rt_runtime_us;
8048 }
8049
sched_group_set_rt_period(struct task_group * tg,long rt_period_us)8050 static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8051 {
8052 u64 rt_runtime, rt_period;
8053
8054 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8055 rt_runtime = tg->rt_bandwidth.rt_runtime;
8056
8057 if (rt_period == 0)
8058 return -EINVAL;
8059
8060 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
8061 }
8062
sched_group_rt_period(struct task_group * tg)8063 static long sched_group_rt_period(struct task_group *tg)
8064 {
8065 u64 rt_period_us;
8066
8067 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8068 do_div(rt_period_us, NSEC_PER_USEC);
8069 return rt_period_us;
8070 }
8071 #endif /* CONFIG_RT_GROUP_SCHED */
8072
8073 #ifdef CONFIG_RT_GROUP_SCHED
sched_rt_global_constraints(void)8074 static int sched_rt_global_constraints(void)
8075 {
8076 int ret = 0;
8077
8078 mutex_lock(&rt_constraints_mutex);
8079 read_lock(&tasklist_lock);
8080 ret = __rt_schedulable(NULL, 0, 0);
8081 read_unlock(&tasklist_lock);
8082 mutex_unlock(&rt_constraints_mutex);
8083
8084 return ret;
8085 }
8086
sched_rt_can_attach(struct task_group * tg,struct task_struct * tsk)8087 static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
8088 {
8089 /* Don't accept realtime tasks when there is no way for them to run */
8090 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8091 return 0;
8092
8093 return 1;
8094 }
8095
8096 #else /* !CONFIG_RT_GROUP_SCHED */
sched_rt_global_constraints(void)8097 static int sched_rt_global_constraints(void)
8098 {
8099 unsigned long flags;
8100 int i, ret = 0;
8101
8102 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
8103 for_each_possible_cpu(i) {
8104 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8105
8106 raw_spin_lock(&rt_rq->rt_runtime_lock);
8107 rt_rq->rt_runtime = global_rt_runtime();
8108 raw_spin_unlock(&rt_rq->rt_runtime_lock);
8109 }
8110 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
8111
8112 return ret;
8113 }
8114 #endif /* CONFIG_RT_GROUP_SCHED */
8115
sched_dl_global_constraints(void)8116 static int sched_dl_global_constraints(void)
8117 {
8118 u64 runtime = global_rt_runtime();
8119 u64 period = global_rt_period();
8120 u64 new_bw = to_ratio(period, runtime);
8121 struct dl_bw *dl_b;
8122 int cpu, ret = 0;
8123 unsigned long flags;
8124
8125 /*
8126 * Here we want to check the bandwidth not being set to some
8127 * value smaller than the currently allocated bandwidth in
8128 * any of the root_domains.
8129 *
8130 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
8131 * cycling on root_domains... Discussion on different/better
8132 * solutions is welcome!
8133 */
8134 for_each_possible_cpu(cpu) {
8135 rcu_read_lock_sched();
8136 dl_b = dl_bw_of(cpu);
8137
8138 raw_spin_lock_irqsave(&dl_b->lock, flags);
8139 if (new_bw < dl_b->total_bw)
8140 ret = -EBUSY;
8141 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
8142
8143 rcu_read_unlock_sched();
8144
8145 if (ret)
8146 break;
8147 }
8148
8149 return ret;
8150 }
8151
sched_dl_do_global(void)8152 static void sched_dl_do_global(void)
8153 {
8154 u64 new_bw = -1;
8155 struct dl_bw *dl_b;
8156 int cpu;
8157 unsigned long flags;
8158
8159 def_dl_bandwidth.dl_period = global_rt_period();
8160 def_dl_bandwidth.dl_runtime = global_rt_runtime();
8161
8162 if (global_rt_runtime() != RUNTIME_INF)
8163 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
8164
8165 /*
8166 * FIXME: As above...
8167 */
8168 for_each_possible_cpu(cpu) {
8169 rcu_read_lock_sched();
8170 dl_b = dl_bw_of(cpu);
8171
8172 raw_spin_lock_irqsave(&dl_b->lock, flags);
8173 dl_b->bw = new_bw;
8174 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
8175
8176 rcu_read_unlock_sched();
8177 }
8178 }
8179
sched_rt_global_validate(void)8180 static int sched_rt_global_validate(void)
8181 {
8182 if (sysctl_sched_rt_period <= 0)
8183 return -EINVAL;
8184
8185 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
8186 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
8187 return -EINVAL;
8188
8189 return 0;
8190 }
8191
sched_rt_do_global(void)8192 static void sched_rt_do_global(void)
8193 {
8194 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8195 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
8196 }
8197
sched_rt_handler(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)8198 int sched_rt_handler(struct ctl_table *table, int write,
8199 void __user *buffer, size_t *lenp,
8200 loff_t *ppos)
8201 {
8202 int old_period, old_runtime;
8203 static DEFINE_MUTEX(mutex);
8204 int ret;
8205
8206 mutex_lock(&mutex);
8207 old_period = sysctl_sched_rt_period;
8208 old_runtime = sysctl_sched_rt_runtime;
8209
8210 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8211
8212 if (!ret && write) {
8213 ret = sched_rt_global_validate();
8214 if (ret)
8215 goto undo;
8216
8217 ret = sched_rt_global_constraints();
8218 if (ret)
8219 goto undo;
8220
8221 ret = sched_dl_global_constraints();
8222 if (ret)
8223 goto undo;
8224
8225 sched_rt_do_global();
8226 sched_dl_do_global();
8227 }
8228 if (0) {
8229 undo:
8230 sysctl_sched_rt_period = old_period;
8231 sysctl_sched_rt_runtime = old_runtime;
8232 }
8233 mutex_unlock(&mutex);
8234
8235 return ret;
8236 }
8237
sched_rr_handler(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)8238 int sched_rr_handler(struct ctl_table *table, int write,
8239 void __user *buffer, size_t *lenp,
8240 loff_t *ppos)
8241 {
8242 int ret;
8243 static DEFINE_MUTEX(mutex);
8244
8245 mutex_lock(&mutex);
8246 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8247 /* make sure that internally we keep jiffies */
8248 /* also, writing zero resets timeslice to default */
8249 if (!ret && write) {
8250 sched_rr_timeslice = sched_rr_timeslice <= 0 ?
8251 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
8252 }
8253 mutex_unlock(&mutex);
8254 return ret;
8255 }
8256
8257 #ifdef CONFIG_CGROUP_SCHED
8258
css_tg(struct cgroup_subsys_state * css)8259 static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
8260 {
8261 return css ? container_of(css, struct task_group, css) : NULL;
8262 }
8263
8264 static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)8265 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8266 {
8267 struct task_group *parent = css_tg(parent_css);
8268 struct task_group *tg;
8269
8270 if (!parent) {
8271 /* This is early initialization for the top cgroup */
8272 return &root_task_group.css;
8273 }
8274
8275 tg = sched_create_group(parent);
8276 if (IS_ERR(tg))
8277 return ERR_PTR(-ENOMEM);
8278
8279 return &tg->css;
8280 }
8281
cpu_cgroup_css_online(struct cgroup_subsys_state * css)8282 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
8283 {
8284 struct task_group *tg = css_tg(css);
8285 struct task_group *parent = css_tg(css->parent);
8286
8287 if (parent)
8288 sched_online_group(tg, parent);
8289 return 0;
8290 }
8291
cpu_cgroup_css_free(struct cgroup_subsys_state * css)8292 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
8293 {
8294 struct task_group *tg = css_tg(css);
8295
8296 sched_destroy_group(tg);
8297 }
8298
cpu_cgroup_css_offline(struct cgroup_subsys_state * css)8299 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
8300 {
8301 struct task_group *tg = css_tg(css);
8302
8303 sched_offline_group(tg);
8304 }
8305
cpu_cgroup_fork(struct task_struct * task)8306 static void cpu_cgroup_fork(struct task_struct *task)
8307 {
8308 sched_move_task(task);
8309 }
8310
cpu_cgroup_can_attach(struct cgroup_subsys_state * css,struct cgroup_taskset * tset)8311 static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
8312 struct cgroup_taskset *tset)
8313 {
8314 struct task_struct *task;
8315
8316 cgroup_taskset_for_each(task, tset) {
8317 #ifdef CONFIG_RT_GROUP_SCHED
8318 if (!sched_rt_can_attach(css_tg(css), task))
8319 return -EINVAL;
8320 #else
8321 /* We don't support RT-tasks being in separate groups */
8322 if (task->sched_class != &fair_sched_class)
8323 return -EINVAL;
8324 #endif
8325 }
8326 return 0;
8327 }
8328
cpu_cgroup_attach(struct cgroup_subsys_state * css,struct cgroup_taskset * tset)8329 static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
8330 struct cgroup_taskset *tset)
8331 {
8332 struct task_struct *task;
8333
8334 cgroup_taskset_for_each(task, tset)
8335 sched_move_task(task);
8336 }
8337
cpu_cgroup_exit(struct cgroup_subsys_state * css,struct cgroup_subsys_state * old_css,struct task_struct * task)8338 static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
8339 struct cgroup_subsys_state *old_css,
8340 struct task_struct *task)
8341 {
8342 sched_move_task(task);
8343 }
8344
8345 #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)8346 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8347 struct cftype *cftype, u64 shareval)
8348 {
8349 return sched_group_set_shares(css_tg(css), scale_load(shareval));
8350 }
8351
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)8352 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8353 struct cftype *cft)
8354 {
8355 struct task_group *tg = css_tg(css);
8356
8357 return (u64) scale_load_down(tg->shares);
8358 }
8359
8360 #ifdef CONFIG_CFS_BANDWIDTH
8361 static DEFINE_MUTEX(cfs_constraints_mutex);
8362
8363 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8364 const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8365
8366 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8367
tg_set_cfs_bandwidth(struct task_group * tg,u64 period,u64 quota)8368 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8369 {
8370 int i, ret = 0, runtime_enabled, runtime_was_enabled;
8371 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8372
8373 if (tg == &root_task_group)
8374 return -EINVAL;
8375
8376 /*
8377 * Ensure we have at some amount of bandwidth every period. This is
8378 * to prevent reaching a state of large arrears when throttled via
8379 * entity_tick() resulting in prolonged exit starvation.
8380 */
8381 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8382 return -EINVAL;
8383
8384 /*
8385 * Likewise, bound things on the otherside by preventing insane quota
8386 * periods. This also allows us to normalize in computing quota
8387 * feasibility.
8388 */
8389 if (period > max_cfs_quota_period)
8390 return -EINVAL;
8391
8392 /*
8393 * Prevent race between setting of cfs_rq->runtime_enabled and
8394 * unthrottle_offline_cfs_rqs().
8395 */
8396 get_online_cpus();
8397 mutex_lock(&cfs_constraints_mutex);
8398 ret = __cfs_schedulable(tg, period, quota);
8399 if (ret)
8400 goto out_unlock;
8401
8402 runtime_enabled = quota != RUNTIME_INF;
8403 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
8404 /*
8405 * If we need to toggle cfs_bandwidth_used, off->on must occur
8406 * before making related changes, and on->off must occur afterwards
8407 */
8408 if (runtime_enabled && !runtime_was_enabled)
8409 cfs_bandwidth_usage_inc();
8410 raw_spin_lock_irq(&cfs_b->lock);
8411 cfs_b->period = ns_to_ktime(period);
8412 cfs_b->quota = quota;
8413
8414 __refill_cfs_bandwidth_runtime(cfs_b);
8415 /* restart the period timer (if active) to handle new period expiry */
8416 if (runtime_enabled && cfs_b->timer_active) {
8417 /* force a reprogram */
8418 __start_cfs_bandwidth(cfs_b, true);
8419 }
8420 raw_spin_unlock_irq(&cfs_b->lock);
8421
8422 for_each_online_cpu(i) {
8423 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
8424 struct rq *rq = cfs_rq->rq;
8425
8426 raw_spin_lock_irq(&rq->lock);
8427 cfs_rq->runtime_enabled = runtime_enabled;
8428 cfs_rq->runtime_remaining = 0;
8429
8430 if (cfs_rq->throttled)
8431 unthrottle_cfs_rq(cfs_rq);
8432 raw_spin_unlock_irq(&rq->lock);
8433 }
8434 if (runtime_was_enabled && !runtime_enabled)
8435 cfs_bandwidth_usage_dec();
8436 out_unlock:
8437 mutex_unlock(&cfs_constraints_mutex);
8438 put_online_cpus();
8439
8440 return ret;
8441 }
8442
tg_set_cfs_quota(struct task_group * tg,long cfs_quota_us)8443 int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8444 {
8445 u64 quota, period;
8446
8447 period = ktime_to_ns(tg->cfs_bandwidth.period);
8448 if (cfs_quota_us < 0)
8449 quota = RUNTIME_INF;
8450 else
8451 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8452
8453 return tg_set_cfs_bandwidth(tg, period, quota);
8454 }
8455
tg_get_cfs_quota(struct task_group * tg)8456 long tg_get_cfs_quota(struct task_group *tg)
8457 {
8458 u64 quota_us;
8459
8460 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
8461 return -1;
8462
8463 quota_us = tg->cfs_bandwidth.quota;
8464 do_div(quota_us, NSEC_PER_USEC);
8465
8466 return quota_us;
8467 }
8468
tg_set_cfs_period(struct task_group * tg,long cfs_period_us)8469 int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8470 {
8471 u64 quota, period;
8472
8473 period = (u64)cfs_period_us * NSEC_PER_USEC;
8474 quota = tg->cfs_bandwidth.quota;
8475
8476 return tg_set_cfs_bandwidth(tg, period, quota);
8477 }
8478
tg_get_cfs_period(struct task_group * tg)8479 long tg_get_cfs_period(struct task_group *tg)
8480 {
8481 u64 cfs_period_us;
8482
8483 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
8484 do_div(cfs_period_us, NSEC_PER_USEC);
8485
8486 return cfs_period_us;
8487 }
8488
cpu_cfs_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)8489 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
8490 struct cftype *cft)
8491 {
8492 return tg_get_cfs_quota(css_tg(css));
8493 }
8494
cpu_cfs_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 cfs_quota_us)8495 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
8496 struct cftype *cftype, s64 cfs_quota_us)
8497 {
8498 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
8499 }
8500
cpu_cfs_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)8501 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
8502 struct cftype *cft)
8503 {
8504 return tg_get_cfs_period(css_tg(css));
8505 }
8506
cpu_cfs_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_period_us)8507 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
8508 struct cftype *cftype, u64 cfs_period_us)
8509 {
8510 return tg_set_cfs_period(css_tg(css), cfs_period_us);
8511 }
8512
8513 struct cfs_schedulable_data {
8514 struct task_group *tg;
8515 u64 period, quota;
8516 };
8517
8518 /*
8519 * normalize group quota/period to be quota/max_period
8520 * note: units are usecs
8521 */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)8522 static u64 normalize_cfs_quota(struct task_group *tg,
8523 struct cfs_schedulable_data *d)
8524 {
8525 u64 quota, period;
8526
8527 if (tg == d->tg) {
8528 period = d->period;
8529 quota = d->quota;
8530 } else {
8531 period = tg_get_cfs_period(tg);
8532 quota = tg_get_cfs_quota(tg);
8533 }
8534
8535 /* note: these should typically be equivalent */
8536 if (quota == RUNTIME_INF || quota == -1)
8537 return RUNTIME_INF;
8538
8539 return to_ratio(period, quota);
8540 }
8541
tg_cfs_schedulable_down(struct task_group * tg,void * data)8542 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8543 {
8544 struct cfs_schedulable_data *d = data;
8545 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8546 s64 quota = 0, parent_quota = -1;
8547
8548 if (!tg->parent) {
8549 quota = RUNTIME_INF;
8550 } else {
8551 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
8552
8553 quota = normalize_cfs_quota(tg, d);
8554 parent_quota = parent_b->hierarchical_quota;
8555
8556 /*
8557 * ensure max(child_quota) <= parent_quota, inherit when no
8558 * limit is set
8559 */
8560 if (quota == RUNTIME_INF)
8561 quota = parent_quota;
8562 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8563 return -EINVAL;
8564 }
8565 cfs_b->hierarchical_quota = quota;
8566
8567 return 0;
8568 }
8569
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)8570 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8571 {
8572 int ret;
8573 struct cfs_schedulable_data data = {
8574 .tg = tg,
8575 .period = period,
8576 .quota = quota,
8577 };
8578
8579 if (quota != RUNTIME_INF) {
8580 do_div(data.period, NSEC_PER_USEC);
8581 do_div(data.quota, NSEC_PER_USEC);
8582 }
8583
8584 rcu_read_lock();
8585 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8586 rcu_read_unlock();
8587
8588 return ret;
8589 }
8590
cpu_stats_show(struct seq_file * sf,void * v)8591 static int cpu_stats_show(struct seq_file *sf, void *v)
8592 {
8593 struct task_group *tg = css_tg(seq_css(sf));
8594 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8595
8596 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
8597 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
8598 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
8599
8600 return 0;
8601 }
8602 #endif /* CONFIG_CFS_BANDWIDTH */
8603 #endif /* CONFIG_FAIR_GROUP_SCHED */
8604
8605 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)8606 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
8607 struct cftype *cft, s64 val)
8608 {
8609 return sched_group_set_rt_runtime(css_tg(css), val);
8610 }
8611
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)8612 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
8613 struct cftype *cft)
8614 {
8615 return sched_group_rt_runtime(css_tg(css));
8616 }
8617
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)8618 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
8619 struct cftype *cftype, u64 rt_period_us)
8620 {
8621 return sched_group_set_rt_period(css_tg(css), rt_period_us);
8622 }
8623
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)8624 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
8625 struct cftype *cft)
8626 {
8627 return sched_group_rt_period(css_tg(css));
8628 }
8629 #endif /* CONFIG_RT_GROUP_SCHED */
8630
8631 static struct cftype cpu_files[] = {
8632 #ifdef CONFIG_FAIR_GROUP_SCHED
8633 {
8634 .name = "shares",
8635 .read_u64 = cpu_shares_read_u64,
8636 .write_u64 = cpu_shares_write_u64,
8637 },
8638 #endif
8639 #ifdef CONFIG_CFS_BANDWIDTH
8640 {
8641 .name = "cfs_quota_us",
8642 .read_s64 = cpu_cfs_quota_read_s64,
8643 .write_s64 = cpu_cfs_quota_write_s64,
8644 },
8645 {
8646 .name = "cfs_period_us",
8647 .read_u64 = cpu_cfs_period_read_u64,
8648 .write_u64 = cpu_cfs_period_write_u64,
8649 },
8650 {
8651 .name = "stat",
8652 .seq_show = cpu_stats_show,
8653 },
8654 #endif
8655 #ifdef CONFIG_RT_GROUP_SCHED
8656 {
8657 .name = "rt_runtime_us",
8658 .read_s64 = cpu_rt_runtime_read,
8659 .write_s64 = cpu_rt_runtime_write,
8660 },
8661 {
8662 .name = "rt_period_us",
8663 .read_u64 = cpu_rt_period_read_uint,
8664 .write_u64 = cpu_rt_period_write_uint,
8665 },
8666 #endif
8667 { } /* terminate */
8668 };
8669
8670 struct cgroup_subsys cpu_cgrp_subsys = {
8671 .css_alloc = cpu_cgroup_css_alloc,
8672 .css_free = cpu_cgroup_css_free,
8673 .css_online = cpu_cgroup_css_online,
8674 .css_offline = cpu_cgroup_css_offline,
8675 .fork = cpu_cgroup_fork,
8676 .can_attach = cpu_cgroup_can_attach,
8677 .attach = cpu_cgroup_attach,
8678 .exit = cpu_cgroup_exit,
8679 .legacy_cftypes = cpu_files,
8680 .early_init = 1,
8681 };
8682
8683 #endif /* CONFIG_CGROUP_SCHED */
8684
dump_cpu_task(int cpu)8685 void dump_cpu_task(int cpu)
8686 {
8687 pr_info("Task dump for CPU %d:\n", cpu);
8688 sched_show_task(cpu_curr(cpu));
8689 }
8690