1 /*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
27 */
28
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/nmi.h>
32 #include <linux/init.h>
33 #include <linux/uaccess.h>
34 #include <linux/highmem.h>
35 #include <linux/smp_lock.h>
36 #include <asm/mmu_context.h>
37 #include <linux/interrupt.h>
38 #include <linux/capability.h>
39 #include <linux/completion.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/debug_locks.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <linux/profile.h>
45 #include <linux/freezer.h>
46 #include <linux/vmalloc.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/pid_namespace.h>
50 #include <linux/smp.h>
51 #include <linux/threads.h>
52 #include <linux/timer.h>
53 #include <linux/rcupdate.h>
54 #include <linux/cpu.h>
55 #include <linux/cpuset.h>
56 #include <linux/percpu.h>
57 #include <linux/kthread.h>
58 #include <linux/proc_fs.h>
59 #include <linux/seq_file.h>
60 #include <linux/sysctl.h>
61 #include <linux/syscalls.h>
62 #include <linux/times.h>
63 #include <linux/tsacct_kern.h>
64 #include <linux/kprobes.h>
65 #include <linux/delayacct.h>
66 #include <linux/reciprocal_div.h>
67 #include <linux/unistd.h>
68 #include <linux/pagemap.h>
69 #include <linux/hrtimer.h>
70 #include <linux/tick.h>
71 #include <linux/bootmem.h>
72 #include <linux/debugfs.h>
73 #include <linux/ctype.h>
74 #include <linux/ftrace.h>
75 #include <trace/sched.h>
76
77 #include <asm/tlb.h>
78 #include <asm/irq_regs.h>
79
80 #include "sched_cpupri.h"
81
82 /*
83 * Convert user-nice values [ -20 ... 0 ... 19 ]
84 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
85 * and back.
86 */
87 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
88 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
89 #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
90
91 /*
92 * 'User priority' is the nice value converted to something we
93 * can work with better when scaling various scheduler parameters,
94 * it's a [ 0 ... 39 ] range.
95 */
96 #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
97 #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
98 #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
99
100 /*
101 * Helpers for converting nanosecond timing to jiffy resolution
102 */
103 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
104
105 #define NICE_0_LOAD SCHED_LOAD_SCALE
106 #define NICE_0_SHIFT SCHED_LOAD_SHIFT
107
108 /*
109 * These are the 'tuning knobs' of the scheduler:
110 *
111 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
112 * Timeslices get refilled after they expire.
113 */
114 #define DEF_TIMESLICE (100 * HZ / 1000)
115
116 /*
117 * single value that denotes runtime == period, ie unlimited time.
118 */
119 #define RUNTIME_INF ((u64)~0ULL)
120
121 DEFINE_TRACE(sched_wait_task);
122 DEFINE_TRACE(sched_wakeup);
123 DEFINE_TRACE(sched_wakeup_new);
124 DEFINE_TRACE(sched_switch);
125 DEFINE_TRACE(sched_migrate_task);
126
127 #ifdef CONFIG_SMP
128
129 static void double_rq_lock(struct rq *rq1, struct rq *rq2);
130
131 /*
132 * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
133 * Since cpu_power is a 'constant', we can use a reciprocal divide.
134 */
sg_div_cpu_power(const struct sched_group * sg,u32 load)135 static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
136 {
137 return reciprocal_divide(load, sg->reciprocal_cpu_power);
138 }
139
140 /*
141 * Each time a sched group cpu_power is changed,
142 * we must compute its reciprocal value
143 */
sg_inc_cpu_power(struct sched_group * sg,u32 val)144 static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
145 {
146 sg->__cpu_power += val;
147 sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
148 }
149 #endif
150
rt_policy(int policy)151 static inline int rt_policy(int policy)
152 {
153 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
154 return 1;
155 return 0;
156 }
157
task_has_rt_policy(struct task_struct * p)158 static inline int task_has_rt_policy(struct task_struct *p)
159 {
160 return rt_policy(p->policy);
161 }
162
163 /*
164 * This is the priority-queue data structure of the RT scheduling class:
165 */
166 struct rt_prio_array {
167 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
168 struct list_head queue[MAX_RT_PRIO];
169 };
170
171 struct rt_bandwidth {
172 /* nests inside the rq lock: */
173 spinlock_t rt_runtime_lock;
174 ktime_t rt_period;
175 u64 rt_runtime;
176 struct hrtimer rt_period_timer;
177 };
178
179 static struct rt_bandwidth def_rt_bandwidth;
180
181 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
182
sched_rt_period_timer(struct hrtimer * timer)183 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
184 {
185 struct rt_bandwidth *rt_b =
186 container_of(timer, struct rt_bandwidth, rt_period_timer);
187 ktime_t now;
188 int overrun;
189 int idle = 0;
190
191 for (;;) {
192 now = hrtimer_cb_get_time(timer);
193 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
194
195 if (!overrun)
196 break;
197
198 idle = do_sched_rt_period_timer(rt_b, overrun);
199 }
200
201 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
202 }
203
204 static
init_rt_bandwidth(struct rt_bandwidth * rt_b,u64 period,u64 runtime)205 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
206 {
207 rt_b->rt_period = ns_to_ktime(period);
208 rt_b->rt_runtime = runtime;
209
210 spin_lock_init(&rt_b->rt_runtime_lock);
211
212 hrtimer_init(&rt_b->rt_period_timer,
213 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
214 rt_b->rt_period_timer.function = sched_rt_period_timer;
215 }
216
rt_bandwidth_enabled(void)217 static inline int rt_bandwidth_enabled(void)
218 {
219 return sysctl_sched_rt_runtime >= 0;
220 }
221
start_rt_bandwidth(struct rt_bandwidth * rt_b)222 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
223 {
224 ktime_t now;
225
226 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
227 return;
228
229 if (hrtimer_active(&rt_b->rt_period_timer))
230 return;
231
232 spin_lock(&rt_b->rt_runtime_lock);
233 for (;;) {
234 if (hrtimer_active(&rt_b->rt_period_timer))
235 break;
236
237 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
238 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
239 hrtimer_start_expires(&rt_b->rt_period_timer,
240 HRTIMER_MODE_ABS);
241 }
242 spin_unlock(&rt_b->rt_runtime_lock);
243 }
244
245 #ifdef CONFIG_RT_GROUP_SCHED
destroy_rt_bandwidth(struct rt_bandwidth * rt_b)246 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
247 {
248 hrtimer_cancel(&rt_b->rt_period_timer);
249 }
250 #endif
251
252 /*
253 * sched_domains_mutex serializes calls to arch_init_sched_domains,
254 * detach_destroy_domains and partition_sched_domains.
255 */
256 static DEFINE_MUTEX(sched_domains_mutex);
257
258 #ifdef CONFIG_GROUP_SCHED
259
260 #include <linux/cgroup.h>
261
262 struct cfs_rq;
263
264 static LIST_HEAD(task_groups);
265
266 /* task group related information */
267 struct task_group {
268 #ifdef CONFIG_CGROUP_SCHED
269 struct cgroup_subsys_state css;
270 #endif
271
272 #ifdef CONFIG_USER_SCHED
273 uid_t uid;
274 #endif
275
276 #ifdef CONFIG_FAIR_GROUP_SCHED
277 /* schedulable entities of this group on each cpu */
278 struct sched_entity **se;
279 /* runqueue "owned" by this group on each cpu */
280 struct cfs_rq **cfs_rq;
281 unsigned long shares;
282 #endif
283
284 #ifdef CONFIG_RT_GROUP_SCHED
285 struct sched_rt_entity **rt_se;
286 struct rt_rq **rt_rq;
287
288 struct rt_bandwidth rt_bandwidth;
289 #endif
290
291 struct rcu_head rcu;
292 struct list_head list;
293
294 struct task_group *parent;
295 struct list_head siblings;
296 struct list_head children;
297 };
298
299 #ifdef CONFIG_USER_SCHED
300
301 /* Helper function to pass uid information to create_sched_user() */
set_tg_uid(struct user_struct * user)302 void set_tg_uid(struct user_struct *user)
303 {
304 user->tg->uid = user->uid;
305 }
306
307 /*
308 * Root task group.
309 * Every UID task group (including init_task_group aka UID-0) will
310 * be a child to this group.
311 */
312 struct task_group root_task_group;
313
314 #ifdef CONFIG_FAIR_GROUP_SCHED
315 /* Default task group's sched entity on each cpu */
316 static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
317 /* Default task group's cfs_rq on each cpu */
318 static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
319 #endif /* CONFIG_FAIR_GROUP_SCHED */
320
321 #ifdef CONFIG_RT_GROUP_SCHED
322 static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
323 static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
324 #endif /* CONFIG_RT_GROUP_SCHED */
325 #else /* !CONFIG_USER_SCHED */
326 #define root_task_group init_task_group
327 #endif /* CONFIG_USER_SCHED */
328
329 /* task_group_lock serializes add/remove of task groups and also changes to
330 * a task group's cpu shares.
331 */
332 static DEFINE_SPINLOCK(task_group_lock);
333
334 #ifdef CONFIG_FAIR_GROUP_SCHED
335 #ifdef CONFIG_USER_SCHED
336 # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
337 #else /* !CONFIG_USER_SCHED */
338 # define INIT_TASK_GROUP_LOAD NICE_0_LOAD
339 #endif /* CONFIG_USER_SCHED */
340
341 /*
342 * A weight of 0 or 1 can cause arithmetics problems.
343 * A weight of a cfs_rq is the sum of weights of which entities
344 * are queued on this cfs_rq, so a weight of a entity should not be
345 * too large, so as the shares value of a task group.
346 * (The default weight is 1024 - so there's no practical
347 * limitation from this.)
348 */
349 #define MIN_SHARES 2
350 #define MAX_SHARES (1UL << 18)
351
352 static int init_task_group_load = INIT_TASK_GROUP_LOAD;
353 #endif
354
355 /* Default task group.
356 * Every task in system belong to this group at bootup.
357 */
358 struct task_group init_task_group;
359
360 /* return group to which a task belongs */
task_group(struct task_struct * p)361 static inline struct task_group *task_group(struct task_struct *p)
362 {
363 struct task_group *tg;
364
365 #ifdef CONFIG_USER_SCHED
366 rcu_read_lock();
367 tg = __task_cred(p)->user->tg;
368 rcu_read_unlock();
369 #elif defined(CONFIG_CGROUP_SCHED)
370 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
371 struct task_group, css);
372 #else
373 tg = &init_task_group;
374 #endif
375 return tg;
376 }
377
378 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
set_task_rq(struct task_struct * p,unsigned int cpu)379 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
380 {
381 #ifdef CONFIG_FAIR_GROUP_SCHED
382 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
383 p->se.parent = task_group(p)->se[cpu];
384 #endif
385
386 #ifdef CONFIG_RT_GROUP_SCHED
387 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
388 p->rt.parent = task_group(p)->rt_se[cpu];
389 #endif
390 }
391
392 #else
393
set_task_rq(struct task_struct * p,unsigned int cpu)394 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
task_group(struct task_struct * p)395 static inline struct task_group *task_group(struct task_struct *p)
396 {
397 return NULL;
398 }
399
400 #endif /* CONFIG_GROUP_SCHED */
401
402 /* CFS-related fields in a runqueue */
403 struct cfs_rq {
404 struct load_weight load;
405 unsigned long nr_running;
406
407 u64 exec_clock;
408 u64 min_vruntime;
409
410 struct rb_root tasks_timeline;
411 struct rb_node *rb_leftmost;
412
413 struct list_head tasks;
414 struct list_head *balance_iterator;
415
416 /*
417 * 'curr' points to currently running entity on this cfs_rq.
418 * It is set to NULL otherwise (i.e when none are currently running).
419 */
420 struct sched_entity *curr, *next, *last;
421
422 unsigned int nr_spread_over;
423
424 #ifdef CONFIG_FAIR_GROUP_SCHED
425 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
426
427 /*
428 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
429 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
430 * (like users, containers etc.)
431 *
432 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
433 * list is used during load balance.
434 */
435 struct list_head leaf_cfs_rq_list;
436 struct task_group *tg; /* group that "owns" this runqueue */
437
438 #ifdef CONFIG_SMP
439 /*
440 * the part of load.weight contributed by tasks
441 */
442 unsigned long task_weight;
443
444 /*
445 * h_load = weight * f(tg)
446 *
447 * Where f(tg) is the recursive weight fraction assigned to
448 * this group.
449 */
450 unsigned long h_load;
451
452 /*
453 * this cpu's part of tg->shares
454 */
455 unsigned long shares;
456
457 /*
458 * load.weight at the time we set shares
459 */
460 unsigned long rq_weight;
461 #endif
462 #endif
463 };
464
465 /* Real-Time classes' related field in a runqueue: */
466 struct rt_rq {
467 struct rt_prio_array active;
468 unsigned long rt_nr_running;
469 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
470 int highest_prio; /* highest queued rt task prio */
471 #endif
472 #ifdef CONFIG_SMP
473 unsigned long rt_nr_migratory;
474 int overloaded;
475 #endif
476 int rt_throttled;
477 u64 rt_time;
478 u64 rt_runtime;
479 /* Nests inside the rq lock: */
480 spinlock_t rt_runtime_lock;
481
482 #ifdef CONFIG_RT_GROUP_SCHED
483 unsigned long rt_nr_boosted;
484
485 struct rq *rq;
486 struct list_head leaf_rt_rq_list;
487 struct task_group *tg;
488 struct sched_rt_entity *rt_se;
489 #endif
490 };
491
492 #ifdef CONFIG_SMP
493
494 /*
495 * We add the notion of a root-domain which will be used to define per-domain
496 * variables. Each exclusive cpuset essentially defines an island domain by
497 * fully partitioning the member cpus from any other cpuset. Whenever a new
498 * exclusive cpuset is created, we also create and attach a new root-domain
499 * object.
500 *
501 */
502 struct root_domain {
503 atomic_t refcount;
504 cpumask_var_t span;
505 cpumask_var_t online;
506
507 /*
508 * The "RT overload" flag: it gets set if a CPU has more than
509 * one runnable RT task.
510 */
511 cpumask_var_t rto_mask;
512 atomic_t rto_count;
513 #ifdef CONFIG_SMP
514 struct cpupri cpupri;
515 #endif
516 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
517 /*
518 * Preferred wake up cpu nominated by sched_mc balance that will be
519 * used when most cpus are idle in the system indicating overall very
520 * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2)
521 */
522 unsigned int sched_mc_preferred_wakeup_cpu;
523 #endif
524 };
525
526 /*
527 * By default the system creates a single root-domain with all cpus as
528 * members (mimicking the global state we have today).
529 */
530 static struct root_domain def_root_domain;
531
532 #endif
533
534 /*
535 * This is the main, per-CPU runqueue data structure.
536 *
537 * Locking rule: those places that want to lock multiple runqueues
538 * (such as the load balancing or the thread migration code), lock
539 * acquire operations must be ordered by ascending &runqueue.
540 */
541 struct rq {
542 /* runqueue lock: */
543 spinlock_t lock;
544
545 /*
546 * nr_running and cpu_load should be in the same cacheline because
547 * remote CPUs use both these fields when doing load calculation.
548 */
549 unsigned long nr_running;
550 #define CPU_LOAD_IDX_MAX 5
551 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
552 unsigned char idle_at_tick;
553 #ifdef CONFIG_NO_HZ
554 unsigned long last_tick_seen;
555 unsigned char in_nohz_recently;
556 #endif
557 /* capture load from *all* tasks on this cpu: */
558 struct load_weight load;
559 unsigned long nr_load_updates;
560 u64 nr_switches;
561
562 struct cfs_rq cfs;
563 struct rt_rq rt;
564
565 #ifdef CONFIG_FAIR_GROUP_SCHED
566 /* list of leaf cfs_rq on this cpu: */
567 struct list_head leaf_cfs_rq_list;
568 #endif
569 #ifdef CONFIG_RT_GROUP_SCHED
570 struct list_head leaf_rt_rq_list;
571 #endif
572
573 /*
574 * This is part of a global counter where only the total sum
575 * over all CPUs matters. A task can increase this counter on
576 * one CPU and if it got migrated afterwards it may decrease
577 * it on another CPU. Always updated under the runqueue lock:
578 */
579 unsigned long nr_uninterruptible;
580
581 struct task_struct *curr, *idle;
582 unsigned long next_balance;
583 struct mm_struct *prev_mm;
584
585 u64 clock;
586
587 atomic_t nr_iowait;
588
589 #ifdef CONFIG_SMP
590 struct root_domain *rd;
591 struct sched_domain *sd;
592
593 /* For active balancing */
594 int active_balance;
595 int push_cpu;
596 /* cpu of this runqueue: */
597 int cpu;
598 int online;
599
600 unsigned long avg_load_per_task;
601
602 struct task_struct *migration_thread;
603 struct list_head migration_queue;
604 #endif
605
606 #ifdef CONFIG_SCHED_HRTICK
607 #ifdef CONFIG_SMP
608 int hrtick_csd_pending;
609 struct call_single_data hrtick_csd;
610 #endif
611 struct hrtimer hrtick_timer;
612 #endif
613
614 #ifdef CONFIG_SCHEDSTATS
615 /* latency stats */
616 struct sched_info rq_sched_info;
617 unsigned long long rq_cpu_time;
618 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
619
620 /* sys_sched_yield() stats */
621 unsigned int yld_exp_empty;
622 unsigned int yld_act_empty;
623 unsigned int yld_both_empty;
624 unsigned int yld_count;
625
626 /* schedule() stats */
627 unsigned int sched_switch;
628 unsigned int sched_count;
629 unsigned int sched_goidle;
630
631 /* try_to_wake_up() stats */
632 unsigned int ttwu_count;
633 unsigned int ttwu_local;
634
635 /* BKL stats */
636 unsigned int bkl_count;
637 #endif
638 };
639
640 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
641
check_preempt_curr(struct rq * rq,struct task_struct * p,int sync)642 static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync)
643 {
644 rq->curr->sched_class->check_preempt_curr(rq, p, sync);
645 }
646
cpu_of(struct rq * rq)647 static inline int cpu_of(struct rq *rq)
648 {
649 #ifdef CONFIG_SMP
650 return rq->cpu;
651 #else
652 return 0;
653 #endif
654 }
655
656 /*
657 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
658 * See detach_destroy_domains: synchronize_sched for details.
659 *
660 * The domain tree of any CPU may only be accessed from within
661 * preempt-disabled sections.
662 */
663 #define for_each_domain(cpu, __sd) \
664 for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
665
666 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
667 #define this_rq() (&__get_cpu_var(runqueues))
668 #define task_rq(p) cpu_rq(task_cpu(p))
669 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
670
update_rq_clock(struct rq * rq)671 static inline void update_rq_clock(struct rq *rq)
672 {
673 rq->clock = sched_clock_cpu(cpu_of(rq));
674 }
675
676 /*
677 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
678 */
679 #ifdef CONFIG_SCHED_DEBUG
680 # define const_debug __read_mostly
681 #else
682 # define const_debug static const
683 #endif
684
685 /**
686 * runqueue_is_locked
687 *
688 * Returns true if the current cpu runqueue is locked.
689 * This interface allows printk to be called with the runqueue lock
690 * held and know whether or not it is OK to wake up the klogd.
691 */
runqueue_is_locked(void)692 int runqueue_is_locked(void)
693 {
694 int cpu = get_cpu();
695 struct rq *rq = cpu_rq(cpu);
696 int ret;
697
698 ret = spin_is_locked(&rq->lock);
699 put_cpu();
700 return ret;
701 }
702
703 /*
704 * Debugging: various feature bits
705 */
706
707 #define SCHED_FEAT(name, enabled) \
708 __SCHED_FEAT_##name ,
709
710 enum {
711 #include "sched_features.h"
712 };
713
714 #undef SCHED_FEAT
715
716 #define SCHED_FEAT(name, enabled) \
717 (1UL << __SCHED_FEAT_##name) * enabled |
718
719 const_debug unsigned int sysctl_sched_features =
720 #include "sched_features.h"
721 0;
722
723 #undef SCHED_FEAT
724
725 #ifdef CONFIG_SCHED_DEBUG
726 #define SCHED_FEAT(name, enabled) \
727 #name ,
728
729 static __read_mostly char *sched_feat_names[] = {
730 #include "sched_features.h"
731 NULL
732 };
733
734 #undef SCHED_FEAT
735
sched_feat_show(struct seq_file * m,void * v)736 static int sched_feat_show(struct seq_file *m, void *v)
737 {
738 int i;
739
740 for (i = 0; sched_feat_names[i]; i++) {
741 if (!(sysctl_sched_features & (1UL << i)))
742 seq_puts(m, "NO_");
743 seq_printf(m, "%s ", sched_feat_names[i]);
744 }
745 seq_puts(m, "\n");
746
747 return 0;
748 }
749
750 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)751 sched_feat_write(struct file *filp, const char __user *ubuf,
752 size_t cnt, loff_t *ppos)
753 {
754 char buf[64];
755 char *cmp = buf;
756 int neg = 0;
757 int i;
758
759 if (cnt > 63)
760 cnt = 63;
761
762 if (copy_from_user(&buf, ubuf, cnt))
763 return -EFAULT;
764
765 buf[cnt] = 0;
766
767 if (strncmp(buf, "NO_", 3) == 0) {
768 neg = 1;
769 cmp += 3;
770 }
771
772 for (i = 0; sched_feat_names[i]; i++) {
773 int len = strlen(sched_feat_names[i]);
774
775 if (strncmp(cmp, sched_feat_names[i], len) == 0) {
776 if (neg)
777 sysctl_sched_features &= ~(1UL << i);
778 else
779 sysctl_sched_features |= (1UL << i);
780 break;
781 }
782 }
783
784 if (!sched_feat_names[i])
785 return -EINVAL;
786
787 filp->f_pos += cnt;
788
789 return cnt;
790 }
791
sched_feat_open(struct inode * inode,struct file * filp)792 static int sched_feat_open(struct inode *inode, struct file *filp)
793 {
794 return single_open(filp, sched_feat_show, NULL);
795 }
796
797 static struct file_operations sched_feat_fops = {
798 .open = sched_feat_open,
799 .write = sched_feat_write,
800 .read = seq_read,
801 .llseek = seq_lseek,
802 .release = single_release,
803 };
804
sched_init_debug(void)805 static __init int sched_init_debug(void)
806 {
807 debugfs_create_file("sched_features", 0644, NULL, NULL,
808 &sched_feat_fops);
809
810 return 0;
811 }
812 late_initcall(sched_init_debug);
813
814 #endif
815
816 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
817
818 /*
819 * Number of tasks to iterate in a single balance run.
820 * Limited because this is done with IRQs disabled.
821 */
822 const_debug unsigned int sysctl_sched_nr_migrate = 32;
823
824 /*
825 * ratelimit for updating the group shares.
826 * default: 0.25ms
827 */
828 unsigned int sysctl_sched_shares_ratelimit = 250000;
829
830 /*
831 * Inject some fuzzyness into changing the per-cpu group shares
832 * this avoids remote rq-locks at the expense of fairness.
833 * default: 4
834 */
835 unsigned int sysctl_sched_shares_thresh = 4;
836
837 /*
838 * period over which we measure -rt task cpu usage in us.
839 * default: 1s
840 */
841 unsigned int sysctl_sched_rt_period = 1000000;
842
843 static __read_mostly int scheduler_running;
844
845 /*
846 * part of the period that we allow rt tasks to run in us.
847 * default: 0.95s
848 */
849 int sysctl_sched_rt_runtime = 950000;
850
global_rt_period(void)851 static inline u64 global_rt_period(void)
852 {
853 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
854 }
855
global_rt_runtime(void)856 static inline u64 global_rt_runtime(void)
857 {
858 if (sysctl_sched_rt_runtime < 0)
859 return RUNTIME_INF;
860
861 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
862 }
863
864 #ifndef prepare_arch_switch
865 # define prepare_arch_switch(next) do { } while (0)
866 #endif
867 #ifndef finish_arch_switch
868 # define finish_arch_switch(prev) do { } while (0)
869 #endif
870
task_current(struct rq * rq,struct task_struct * p)871 static inline int task_current(struct rq *rq, struct task_struct *p)
872 {
873 return rq->curr == p;
874 }
875
876 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
task_running(struct rq * rq,struct task_struct * p)877 static inline int task_running(struct rq *rq, struct task_struct *p)
878 {
879 return task_current(rq, p);
880 }
881
prepare_lock_switch(struct rq * rq,struct task_struct * next)882 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
883 {
884 }
885
finish_lock_switch(struct rq * rq,struct task_struct * prev)886 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
887 {
888 #ifdef CONFIG_DEBUG_SPINLOCK
889 /* this is a valid case when another task releases the spinlock */
890 rq->lock.owner = current;
891 #endif
892 /*
893 * If we are tracking spinlock dependencies then we have to
894 * fix up the runqueue lock - which gets 'carried over' from
895 * prev into current:
896 */
897 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
898
899 spin_unlock_irq(&rq->lock);
900 }
901
902 #else /* __ARCH_WANT_UNLOCKED_CTXSW */
task_running(struct rq * rq,struct task_struct * p)903 static inline int task_running(struct rq *rq, struct task_struct *p)
904 {
905 #ifdef CONFIG_SMP
906 return p->oncpu;
907 #else
908 return task_current(rq, p);
909 #endif
910 }
911
prepare_lock_switch(struct rq * rq,struct task_struct * next)912 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
913 {
914 #ifdef CONFIG_SMP
915 /*
916 * We can optimise this out completely for !SMP, because the
917 * SMP rebalancing from interrupt is the only thing that cares
918 * here.
919 */
920 next->oncpu = 1;
921 #endif
922 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
923 spin_unlock_irq(&rq->lock);
924 #else
925 spin_unlock(&rq->lock);
926 #endif
927 }
928
finish_lock_switch(struct rq * rq,struct task_struct * prev)929 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
930 {
931 #ifdef CONFIG_SMP
932 /*
933 * After ->oncpu is cleared, the task can be moved to a different CPU.
934 * We must ensure this doesn't happen until the switch is completely
935 * finished.
936 */
937 smp_wmb();
938 prev->oncpu = 0;
939 #endif
940 #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
941 local_irq_enable();
942 #endif
943 }
944 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
945
946 /*
947 * __task_rq_lock - lock the runqueue a given task resides on.
948 * Must be called interrupts disabled.
949 */
__task_rq_lock(struct task_struct * p)950 static inline struct rq *__task_rq_lock(struct task_struct *p)
951 __acquires(rq->lock)
952 {
953 for (;;) {
954 struct rq *rq = task_rq(p);
955 spin_lock(&rq->lock);
956 if (likely(rq == task_rq(p)))
957 return rq;
958 spin_unlock(&rq->lock);
959 }
960 }
961
962 /*
963 * task_rq_lock - lock the runqueue a given task resides on and disable
964 * interrupts. Note the ordering: we can safely lookup the task_rq without
965 * explicitly disabling preemption.
966 */
task_rq_lock(struct task_struct * p,unsigned long * flags)967 static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
968 __acquires(rq->lock)
969 {
970 struct rq *rq;
971
972 for (;;) {
973 local_irq_save(*flags);
974 rq = task_rq(p);
975 spin_lock(&rq->lock);
976 if (likely(rq == task_rq(p)))
977 return rq;
978 spin_unlock_irqrestore(&rq->lock, *flags);
979 }
980 }
981
task_rq_unlock_wait(struct task_struct * p)982 void task_rq_unlock_wait(struct task_struct *p)
983 {
984 struct rq *rq = task_rq(p);
985
986 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
987 spin_unlock_wait(&rq->lock);
988 }
989
__task_rq_unlock(struct rq * rq)990 static void __task_rq_unlock(struct rq *rq)
991 __releases(rq->lock)
992 {
993 spin_unlock(&rq->lock);
994 }
995
task_rq_unlock(struct rq * rq,unsigned long * flags)996 static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
997 __releases(rq->lock)
998 {
999 spin_unlock_irqrestore(&rq->lock, *flags);
1000 }
1001
1002 /*
1003 * this_rq_lock - lock this runqueue and disable interrupts.
1004 */
this_rq_lock(void)1005 static struct rq *this_rq_lock(void)
1006 __acquires(rq->lock)
1007 {
1008 struct rq *rq;
1009
1010 local_irq_disable();
1011 rq = this_rq();
1012 spin_lock(&rq->lock);
1013
1014 return rq;
1015 }
1016
1017 #ifdef CONFIG_SCHED_HRTICK
1018 /*
1019 * Use HR-timers to deliver accurate preemption points.
1020 *
1021 * Its all a bit involved since we cannot program an hrt while holding the
1022 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1023 * reschedule event.
1024 *
1025 * When we get rescheduled we reprogram the hrtick_timer outside of the
1026 * rq->lock.
1027 */
1028
1029 /*
1030 * Use hrtick when:
1031 * - enabled by features
1032 * - hrtimer is actually high res
1033 */
hrtick_enabled(struct rq * rq)1034 static inline int hrtick_enabled(struct rq *rq)
1035 {
1036 if (!sched_feat(HRTICK))
1037 return 0;
1038 if (!cpu_active(cpu_of(rq)))
1039 return 0;
1040 return hrtimer_is_hres_active(&rq->hrtick_timer);
1041 }
1042
hrtick_clear(struct rq * rq)1043 static void hrtick_clear(struct rq *rq)
1044 {
1045 if (hrtimer_active(&rq->hrtick_timer))
1046 hrtimer_cancel(&rq->hrtick_timer);
1047 }
1048
1049 /*
1050 * High-resolution timer tick.
1051 * Runs from hardirq context with interrupts disabled.
1052 */
hrtick(struct hrtimer * timer)1053 static enum hrtimer_restart hrtick(struct hrtimer *timer)
1054 {
1055 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1056
1057 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1058
1059 spin_lock(&rq->lock);
1060 update_rq_clock(rq);
1061 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1062 spin_unlock(&rq->lock);
1063
1064 return HRTIMER_NORESTART;
1065 }
1066
1067 #ifdef CONFIG_SMP
1068 /*
1069 * called from hardirq (IPI) context
1070 */
__hrtick_start(void * arg)1071 static void __hrtick_start(void *arg)
1072 {
1073 struct rq *rq = arg;
1074
1075 spin_lock(&rq->lock);
1076 hrtimer_restart(&rq->hrtick_timer);
1077 rq->hrtick_csd_pending = 0;
1078 spin_unlock(&rq->lock);
1079 }
1080
1081 /*
1082 * Called to set the hrtick timer state.
1083 *
1084 * called with rq->lock held and irqs disabled
1085 */
hrtick_start(struct rq * rq,u64 delay)1086 static void hrtick_start(struct rq *rq, u64 delay)
1087 {
1088 struct hrtimer *timer = &rq->hrtick_timer;
1089 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
1090
1091 hrtimer_set_expires(timer, time);
1092
1093 if (rq == this_rq()) {
1094 hrtimer_restart(timer);
1095 } else if (!rq->hrtick_csd_pending) {
1096 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd);
1097 rq->hrtick_csd_pending = 1;
1098 }
1099 }
1100
1101 static int
hotplug_hrtick(struct notifier_block * nfb,unsigned long action,void * hcpu)1102 hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1103 {
1104 int cpu = (int)(long)hcpu;
1105
1106 switch (action) {
1107 case CPU_UP_CANCELED:
1108 case CPU_UP_CANCELED_FROZEN:
1109 case CPU_DOWN_PREPARE:
1110 case CPU_DOWN_PREPARE_FROZEN:
1111 case CPU_DEAD:
1112 case CPU_DEAD_FROZEN:
1113 hrtick_clear(cpu_rq(cpu));
1114 return NOTIFY_OK;
1115 }
1116
1117 return NOTIFY_DONE;
1118 }
1119
init_hrtick(void)1120 static __init void init_hrtick(void)
1121 {
1122 hotcpu_notifier(hotplug_hrtick, 0);
1123 }
1124 #else
1125 /*
1126 * Called to set the hrtick timer state.
1127 *
1128 * called with rq->lock held and irqs disabled
1129 */
hrtick_start(struct rq * rq,u64 delay)1130 static void hrtick_start(struct rq *rq, u64 delay)
1131 {
1132 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
1133 }
1134
init_hrtick(void)1135 static inline void init_hrtick(void)
1136 {
1137 }
1138 #endif /* CONFIG_SMP */
1139
init_rq_hrtick(struct rq * rq)1140 static void init_rq_hrtick(struct rq *rq)
1141 {
1142 #ifdef CONFIG_SMP
1143 rq->hrtick_csd_pending = 0;
1144
1145 rq->hrtick_csd.flags = 0;
1146 rq->hrtick_csd.func = __hrtick_start;
1147 rq->hrtick_csd.info = rq;
1148 #endif
1149
1150 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1151 rq->hrtick_timer.function = hrtick;
1152 }
1153 #else /* CONFIG_SCHED_HRTICK */
hrtick_clear(struct rq * rq)1154 static inline void hrtick_clear(struct rq *rq)
1155 {
1156 }
1157
init_rq_hrtick(struct rq * rq)1158 static inline void init_rq_hrtick(struct rq *rq)
1159 {
1160 }
1161
init_hrtick(void)1162 static inline void init_hrtick(void)
1163 {
1164 }
1165 #endif /* CONFIG_SCHED_HRTICK */
1166
1167 /*
1168 * resched_task - mark a task 'to be rescheduled now'.
1169 *
1170 * On UP this means the setting of the need_resched flag, on SMP it
1171 * might also involve a cross-CPU call to trigger the scheduler on
1172 * the target CPU.
1173 */
1174 #ifdef CONFIG_SMP
1175
1176 #ifndef tsk_is_polling
1177 #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1178 #endif
1179
resched_task(struct task_struct * p)1180 static void resched_task(struct task_struct *p)
1181 {
1182 int cpu;
1183
1184 assert_spin_locked(&task_rq(p)->lock);
1185
1186 if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
1187 return;
1188
1189 set_tsk_thread_flag(p, TIF_NEED_RESCHED);
1190
1191 cpu = task_cpu(p);
1192 if (cpu == smp_processor_id())
1193 return;
1194
1195 /* NEED_RESCHED must be visible before we test polling */
1196 smp_mb();
1197 if (!tsk_is_polling(p))
1198 smp_send_reschedule(cpu);
1199 }
1200
resched_cpu(int cpu)1201 static void resched_cpu(int cpu)
1202 {
1203 struct rq *rq = cpu_rq(cpu);
1204 unsigned long flags;
1205
1206 if (!spin_trylock_irqsave(&rq->lock, flags))
1207 return;
1208 resched_task(cpu_curr(cpu));
1209 spin_unlock_irqrestore(&rq->lock, flags);
1210 }
1211
1212 #ifdef CONFIG_NO_HZ
1213 /*
1214 * When add_timer_on() enqueues a timer into the timer wheel of an
1215 * idle CPU then this timer might expire before the next timer event
1216 * which is scheduled to wake up that CPU. In case of a completely
1217 * idle system the next event might even be infinite time into the
1218 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1219 * leaves the inner idle loop so the newly added timer is taken into
1220 * account when the CPU goes back to idle and evaluates the timer
1221 * wheel for the next timer event.
1222 */
wake_up_idle_cpu(int cpu)1223 void wake_up_idle_cpu(int cpu)
1224 {
1225 struct rq *rq = cpu_rq(cpu);
1226
1227 if (cpu == smp_processor_id())
1228 return;
1229
1230 /*
1231 * This is safe, as this function is called with the timer
1232 * wheel base lock of (cpu) held. When the CPU is on the way
1233 * to idle and has not yet set rq->curr to idle then it will
1234 * be serialized on the timer wheel base lock and take the new
1235 * timer into account automatically.
1236 */
1237 if (rq->curr != rq->idle)
1238 return;
1239
1240 /*
1241 * We can set TIF_RESCHED on the idle task of the other CPU
1242 * lockless. The worst case is that the other CPU runs the
1243 * idle task through an additional NOOP schedule()
1244 */
1245 set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED);
1246
1247 /* NEED_RESCHED must be visible before we test polling */
1248 smp_mb();
1249 if (!tsk_is_polling(rq->idle))
1250 smp_send_reschedule(cpu);
1251 }
1252 #endif /* CONFIG_NO_HZ */
1253
1254 #else /* !CONFIG_SMP */
resched_task(struct task_struct * p)1255 static void resched_task(struct task_struct *p)
1256 {
1257 assert_spin_locked(&task_rq(p)->lock);
1258 set_tsk_need_resched(p);
1259 }
1260 #endif /* CONFIG_SMP */
1261
1262 #if BITS_PER_LONG == 32
1263 # define WMULT_CONST (~0UL)
1264 #else
1265 # define WMULT_CONST (1UL << 32)
1266 #endif
1267
1268 #define WMULT_SHIFT 32
1269
1270 /*
1271 * Shift right and round:
1272 */
1273 #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
1274
1275 /*
1276 * delta *= weight / lw
1277 */
1278 static unsigned long
calc_delta_mine(unsigned long delta_exec,unsigned long weight,struct load_weight * lw)1279 calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1280 struct load_weight *lw)
1281 {
1282 u64 tmp;
1283
1284 if (!lw->inv_weight) {
1285 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1286 lw->inv_weight = 1;
1287 else
1288 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1289 / (lw->weight+1);
1290 }
1291
1292 tmp = (u64)delta_exec * weight;
1293 /*
1294 * Check whether we'd overflow the 64-bit multiplication:
1295 */
1296 if (unlikely(tmp > WMULT_CONST))
1297 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
1298 WMULT_SHIFT/2);
1299 else
1300 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
1301
1302 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
1303 }
1304
update_load_add(struct load_weight * lw,unsigned long inc)1305 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
1306 {
1307 lw->weight += inc;
1308 lw->inv_weight = 0;
1309 }
1310
update_load_sub(struct load_weight * lw,unsigned long dec)1311 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
1312 {
1313 lw->weight -= dec;
1314 lw->inv_weight = 0;
1315 }
1316
1317 /*
1318 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1319 * of tasks with abnormal "nice" values across CPUs the contribution that
1320 * each task makes to its run queue's load is weighted according to its
1321 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1322 * scaled version of the new time slice allocation that they receive on time
1323 * slice expiry etc.
1324 */
1325
1326 #define WEIGHT_IDLEPRIO 3
1327 #define WMULT_IDLEPRIO 1431655765
1328
1329 /*
1330 * Nice levels are multiplicative, with a gentle 10% change for every
1331 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1332 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1333 * that remained on nice 0.
1334 *
1335 * The "10% effect" is relative and cumulative: from _any_ nice level,
1336 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
1337 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1338 * If a task goes up by ~10% and another task goes down by ~10% then
1339 * the relative distance between them is ~25%.)
1340 */
1341 static const int prio_to_weight[40] = {
1342 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1343 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1344 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1345 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1346 /* 0 */ 1024, 820, 655, 526, 423,
1347 /* 5 */ 335, 272, 215, 172, 137,
1348 /* 10 */ 110, 87, 70, 56, 45,
1349 /* 15 */ 36, 29, 23, 18, 15,
1350 };
1351
1352 /*
1353 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1354 *
1355 * In cases where the weight does not change often, we can use the
1356 * precalculated inverse to speed up arithmetics by turning divisions
1357 * into multiplications:
1358 */
1359 static const u32 prio_to_wmult[40] = {
1360 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1361 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1362 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1363 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1364 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1365 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1366 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1367 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1368 };
1369
1370 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
1371
1372 /*
1373 * runqueue iterator, to support SMP load-balancing between different
1374 * scheduling classes, without having to expose their internal data
1375 * structures to the load-balancing proper:
1376 */
1377 struct rq_iterator {
1378 void *arg;
1379 struct task_struct *(*start)(void *);
1380 struct task_struct *(*next)(void *);
1381 };
1382
1383 #ifdef CONFIG_SMP
1384 static unsigned long
1385 balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1386 unsigned long max_load_move, struct sched_domain *sd,
1387 enum cpu_idle_type idle, int *all_pinned,
1388 int *this_best_prio, struct rq_iterator *iterator);
1389
1390 static int
1391 iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1392 struct sched_domain *sd, enum cpu_idle_type idle,
1393 struct rq_iterator *iterator);
1394 #endif
1395
1396 #ifdef CONFIG_CGROUP_CPUACCT
1397 static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
1398 #else
cpuacct_charge(struct task_struct * tsk,u64 cputime)1399 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
1400 #endif
1401
inc_cpu_load(struct rq * rq,unsigned long load)1402 static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1403 {
1404 update_load_add(&rq->load, load);
1405 }
1406
dec_cpu_load(struct rq * rq,unsigned long load)1407 static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1408 {
1409 update_load_sub(&rq->load, load);
1410 }
1411
1412 #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
1413 typedef int (*tg_visitor)(struct task_group *, void *);
1414
1415 /*
1416 * Iterate the full tree, calling @down when first entering a node and @up when
1417 * leaving it for the final time.
1418 */
walk_tg_tree(tg_visitor down,tg_visitor up,void * data)1419 static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1420 {
1421 struct task_group *parent, *child;
1422 int ret;
1423
1424 rcu_read_lock();
1425 parent = &root_task_group;
1426 down:
1427 ret = (*down)(parent, data);
1428 if (ret)
1429 goto out_unlock;
1430 list_for_each_entry_rcu(child, &parent->children, siblings) {
1431 parent = child;
1432 goto down;
1433
1434 up:
1435 continue;
1436 }
1437 ret = (*up)(parent, data);
1438 if (ret)
1439 goto out_unlock;
1440
1441 child = parent;
1442 parent = parent->parent;
1443 if (parent)
1444 goto up;
1445 out_unlock:
1446 rcu_read_unlock();
1447
1448 return ret;
1449 }
1450
tg_nop(struct task_group * tg,void * data)1451 static int tg_nop(struct task_group *tg, void *data)
1452 {
1453 return 0;
1454 }
1455 #endif
1456
1457 #ifdef CONFIG_SMP
1458 static unsigned long source_load(int cpu, int type);
1459 static unsigned long target_load(int cpu, int type);
1460 static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1461
cpu_avg_load_per_task(int cpu)1462 static unsigned long cpu_avg_load_per_task(int cpu)
1463 {
1464 struct rq *rq = cpu_rq(cpu);
1465 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
1466
1467 if (nr_running)
1468 rq->avg_load_per_task = rq->load.weight / nr_running;
1469 else
1470 rq->avg_load_per_task = 0;
1471
1472 return rq->avg_load_per_task;
1473 }
1474
1475 #ifdef CONFIG_FAIR_GROUP_SCHED
1476
1477 static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1478
1479 /*
1480 * Calculate and set the cpu's group shares.
1481 */
1482 static void
update_group_shares_cpu(struct task_group * tg,int cpu,unsigned long sd_shares,unsigned long sd_rq_weight)1483 update_group_shares_cpu(struct task_group *tg, int cpu,
1484 unsigned long sd_shares, unsigned long sd_rq_weight)
1485 {
1486 unsigned long shares;
1487 unsigned long rq_weight;
1488
1489 if (!tg->se[cpu])
1490 return;
1491
1492 rq_weight = tg->cfs_rq[cpu]->rq_weight;
1493
1494 /*
1495 * \Sum shares * rq_weight
1496 * shares = -----------------------
1497 * \Sum rq_weight
1498 *
1499 */
1500 shares = (sd_shares * rq_weight) / sd_rq_weight;
1501 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
1502
1503 if (abs(shares - tg->se[cpu]->load.weight) >
1504 sysctl_sched_shares_thresh) {
1505 struct rq *rq = cpu_rq(cpu);
1506 unsigned long flags;
1507
1508 spin_lock_irqsave(&rq->lock, flags);
1509 tg->cfs_rq[cpu]->shares = shares;
1510
1511 __set_se_shares(tg->se[cpu], shares);
1512 spin_unlock_irqrestore(&rq->lock, flags);
1513 }
1514 }
1515
1516 /*
1517 * Re-compute the task group their per cpu shares over the given domain.
1518 * This needs to be done in a bottom-up fashion because the rq weight of a
1519 * parent group depends on the shares of its child groups.
1520 */
tg_shares_up(struct task_group * tg,void * data)1521 static int tg_shares_up(struct task_group *tg, void *data)
1522 {
1523 unsigned long weight, rq_weight = 0;
1524 unsigned long shares = 0;
1525 struct sched_domain *sd = data;
1526 int i;
1527
1528 for_each_cpu(i, sched_domain_span(sd)) {
1529 /*
1530 * If there are currently no tasks on the cpu pretend there
1531 * is one of average load so that when a new task gets to
1532 * run here it will not get delayed by group starvation.
1533 */
1534 weight = tg->cfs_rq[i]->load.weight;
1535 if (!weight)
1536 weight = NICE_0_LOAD;
1537
1538 tg->cfs_rq[i]->rq_weight = weight;
1539 rq_weight += weight;
1540 shares += tg->cfs_rq[i]->shares;
1541 }
1542
1543 if ((!shares && rq_weight) || shares > tg->shares)
1544 shares = tg->shares;
1545
1546 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1547 shares = tg->shares;
1548
1549 for_each_cpu(i, sched_domain_span(sd))
1550 update_group_shares_cpu(tg, i, shares, rq_weight);
1551
1552 return 0;
1553 }
1554
1555 /*
1556 * Compute the cpu's hierarchical load factor for each task group.
1557 * This needs to be done in a top-down fashion because the load of a child
1558 * group is a fraction of its parents load.
1559 */
tg_load_down(struct task_group * tg,void * data)1560 static int tg_load_down(struct task_group *tg, void *data)
1561 {
1562 unsigned long load;
1563 long cpu = (long)data;
1564
1565 if (!tg->parent) {
1566 load = cpu_rq(cpu)->load.weight;
1567 } else {
1568 load = tg->parent->cfs_rq[cpu]->h_load;
1569 load *= tg->cfs_rq[cpu]->shares;
1570 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1571 }
1572
1573 tg->cfs_rq[cpu]->h_load = load;
1574
1575 return 0;
1576 }
1577
update_shares(struct sched_domain * sd)1578 static void update_shares(struct sched_domain *sd)
1579 {
1580 u64 now = cpu_clock(raw_smp_processor_id());
1581 s64 elapsed = now - sd->last_update;
1582
1583 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
1584 sd->last_update = now;
1585 walk_tg_tree(tg_nop, tg_shares_up, sd);
1586 }
1587 }
1588
update_shares_locked(struct rq * rq,struct sched_domain * sd)1589 static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1590 {
1591 spin_unlock(&rq->lock);
1592 update_shares(sd);
1593 spin_lock(&rq->lock);
1594 }
1595
update_h_load(long cpu)1596 static void update_h_load(long cpu)
1597 {
1598 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
1599 }
1600
1601 #else
1602
update_shares(struct sched_domain * sd)1603 static inline void update_shares(struct sched_domain *sd)
1604 {
1605 }
1606
update_shares_locked(struct rq * rq,struct sched_domain * sd)1607 static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1608 {
1609 }
1610
1611 #endif
1612
1613 /*
1614 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1615 */
double_lock_balance(struct rq * this_rq,struct rq * busiest)1616 static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1617 __releases(this_rq->lock)
1618 __acquires(busiest->lock)
1619 __acquires(this_rq->lock)
1620 {
1621 int ret = 0;
1622
1623 if (unlikely(!irqs_disabled())) {
1624 /* printk() doesn't work good under rq->lock */
1625 spin_unlock(&this_rq->lock);
1626 BUG_ON(1);
1627 }
1628 if (unlikely(!spin_trylock(&busiest->lock))) {
1629 if (busiest < this_rq) {
1630 spin_unlock(&this_rq->lock);
1631 spin_lock(&busiest->lock);
1632 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
1633 ret = 1;
1634 } else
1635 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
1636 }
1637 return ret;
1638 }
1639
double_unlock_balance(struct rq * this_rq,struct rq * busiest)1640 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1641 __releases(busiest->lock)
1642 {
1643 spin_unlock(&busiest->lock);
1644 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1645 }
1646 #endif
1647
1648 #ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq_set_shares(struct cfs_rq * cfs_rq,unsigned long shares)1649 static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1650 {
1651 #ifdef CONFIG_SMP
1652 cfs_rq->shares = shares;
1653 #endif
1654 }
1655 #endif
1656
1657 #include "sched_stats.h"
1658 #include "sched_idletask.c"
1659 #include "sched_fair.c"
1660 #include "sched_rt.c"
1661 #ifdef CONFIG_SCHED_DEBUG
1662 # include "sched_debug.c"
1663 #endif
1664
1665 #define sched_class_highest (&rt_sched_class)
1666 #define for_each_class(class) \
1667 for (class = sched_class_highest; class; class = class->next)
1668
inc_nr_running(struct rq * rq)1669 static void inc_nr_running(struct rq *rq)
1670 {
1671 rq->nr_running++;
1672 }
1673
dec_nr_running(struct rq * rq)1674 static void dec_nr_running(struct rq *rq)
1675 {
1676 rq->nr_running--;
1677 }
1678
set_load_weight(struct task_struct * p)1679 static void set_load_weight(struct task_struct *p)
1680 {
1681 if (task_has_rt_policy(p)) {
1682 p->se.load.weight = prio_to_weight[0] * 2;
1683 p->se.load.inv_weight = prio_to_wmult[0] >> 1;
1684 return;
1685 }
1686
1687 /*
1688 * SCHED_IDLE tasks get minimal weight:
1689 */
1690 if (p->policy == SCHED_IDLE) {
1691 p->se.load.weight = WEIGHT_IDLEPRIO;
1692 p->se.load.inv_weight = WMULT_IDLEPRIO;
1693 return;
1694 }
1695
1696 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1697 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
1698 }
1699
update_avg(u64 * avg,u64 sample)1700 static void update_avg(u64 *avg, u64 sample)
1701 {
1702 s64 diff = sample - *avg;
1703 *avg += diff >> 3;
1704 }
1705
enqueue_task(struct rq * rq,struct task_struct * p,int wakeup)1706 static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1707 {
1708 sched_info_queued(p);
1709 p->sched_class->enqueue_task(rq, p, wakeup);
1710 p->se.on_rq = 1;
1711 }
1712
dequeue_task(struct rq * rq,struct task_struct * p,int sleep)1713 static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1714 {
1715 if (sleep && p->se.last_wakeup) {
1716 update_avg(&p->se.avg_overlap,
1717 p->se.sum_exec_runtime - p->se.last_wakeup);
1718 p->se.last_wakeup = 0;
1719 }
1720
1721 sched_info_dequeued(p);
1722 p->sched_class->dequeue_task(rq, p, sleep);
1723 p->se.on_rq = 0;
1724 }
1725
1726 /*
1727 * __normal_prio - return the priority that is based on the static prio
1728 */
__normal_prio(struct task_struct * p)1729 static inline int __normal_prio(struct task_struct *p)
1730 {
1731 return p->static_prio;
1732 }
1733
1734 /*
1735 * Calculate the expected normal priority: i.e. priority
1736 * without taking RT-inheritance into account. Might be
1737 * boosted by interactivity modifiers. Changes upon fork,
1738 * setprio syscalls, and whenever the interactivity
1739 * estimator recalculates.
1740 */
normal_prio(struct task_struct * p)1741 static inline int normal_prio(struct task_struct *p)
1742 {
1743 int prio;
1744
1745 if (task_has_rt_policy(p))
1746 prio = MAX_RT_PRIO-1 - p->rt_priority;
1747 else
1748 prio = __normal_prio(p);
1749 return prio;
1750 }
1751
1752 /*
1753 * Calculate the current priority, i.e. the priority
1754 * taken into account by the scheduler. This value might
1755 * be boosted by RT tasks, or might be boosted by
1756 * interactivity modifiers. Will be RT if the task got
1757 * RT-boosted. If not then it returns p->normal_prio.
1758 */
effective_prio(struct task_struct * p)1759 static int effective_prio(struct task_struct *p)
1760 {
1761 p->normal_prio = normal_prio(p);
1762 /*
1763 * If we are RT tasks or we were boosted to RT priority,
1764 * keep the priority unchanged. Otherwise, update priority
1765 * to the normal priority:
1766 */
1767 if (!rt_prio(p->prio))
1768 return p->normal_prio;
1769 return p->prio;
1770 }
1771
1772 /*
1773 * activate_task - move a task to the runqueue.
1774 */
activate_task(struct rq * rq,struct task_struct * p,int wakeup)1775 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1776 {
1777 if (task_contributes_to_load(p))
1778 rq->nr_uninterruptible--;
1779
1780 enqueue_task(rq, p, wakeup);
1781 inc_nr_running(rq);
1782 }
1783
1784 /*
1785 * deactivate_task - remove a task from the runqueue.
1786 */
deactivate_task(struct rq * rq,struct task_struct * p,int sleep)1787 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
1788 {
1789 if (task_contributes_to_load(p))
1790 rq->nr_uninterruptible++;
1791
1792 dequeue_task(rq, p, sleep);
1793 dec_nr_running(rq);
1794 }
1795
1796 /**
1797 * task_curr - is this task currently executing on a CPU?
1798 * @p: the task in question.
1799 */
task_curr(const struct task_struct * p)1800 inline int task_curr(const struct task_struct *p)
1801 {
1802 return cpu_curr(task_cpu(p)) == p;
1803 }
1804
__set_task_cpu(struct task_struct * p,unsigned int cpu)1805 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1806 {
1807 set_task_rq(p, cpu);
1808 #ifdef CONFIG_SMP
1809 /*
1810 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1811 * successfuly executed on another CPU. We must ensure that updates of
1812 * per-task data have been completed by this moment.
1813 */
1814 smp_wmb();
1815 task_thread_info(p)->cpu = cpu;
1816 #endif
1817 }
1818
check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio,int running)1819 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1820 const struct sched_class *prev_class,
1821 int oldprio, int running)
1822 {
1823 if (prev_class != p->sched_class) {
1824 if (prev_class->switched_from)
1825 prev_class->switched_from(rq, p, running);
1826 p->sched_class->switched_to(rq, p, running);
1827 } else
1828 p->sched_class->prio_changed(rq, p, oldprio, running);
1829 }
1830
1831 #ifdef CONFIG_SMP
1832
1833 /* Used instead of source_load when we know the type == 0 */
weighted_cpuload(const int cpu)1834 static unsigned long weighted_cpuload(const int cpu)
1835 {
1836 return cpu_rq(cpu)->load.weight;
1837 }
1838
1839 /*
1840 * Is this task likely cache-hot:
1841 */
1842 static int
task_hot(struct task_struct * p,u64 now,struct sched_domain * sd)1843 task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
1844 {
1845 s64 delta;
1846
1847 /*
1848 * Buddy candidates are cache hot:
1849 */
1850 if (sched_feat(CACHE_HOT_BUDDY) &&
1851 (&p->se == cfs_rq_of(&p->se)->next ||
1852 &p->se == cfs_rq_of(&p->se)->last))
1853 return 1;
1854
1855 if (p->sched_class != &fair_sched_class)
1856 return 0;
1857
1858 if (sysctl_sched_migration_cost == -1)
1859 return 1;
1860 if (sysctl_sched_migration_cost == 0)
1861 return 0;
1862
1863 delta = now - p->se.exec_start;
1864
1865 return delta < (s64)sysctl_sched_migration_cost;
1866 }
1867
1868
set_task_cpu(struct task_struct * p,unsigned int new_cpu)1869 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1870 {
1871 int old_cpu = task_cpu(p);
1872 struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
1873 struct cfs_rq *old_cfsrq = task_cfs_rq(p),
1874 *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
1875 u64 clock_offset;
1876
1877 clock_offset = old_rq->clock - new_rq->clock;
1878
1879 trace_sched_migrate_task(p, task_cpu(p), new_cpu);
1880
1881 #ifdef CONFIG_SCHEDSTATS
1882 if (p->se.wait_start)
1883 p->se.wait_start -= clock_offset;
1884 if (p->se.sleep_start)
1885 p->se.sleep_start -= clock_offset;
1886 if (p->se.block_start)
1887 p->se.block_start -= clock_offset;
1888 if (old_cpu != new_cpu) {
1889 schedstat_inc(p, se.nr_migrations);
1890 if (task_hot(p, old_rq->clock, NULL))
1891 schedstat_inc(p, se.nr_forced2_migrations);
1892 }
1893 #endif
1894 p->se.vruntime -= old_cfsrq->min_vruntime -
1895 new_cfsrq->min_vruntime;
1896
1897 __set_task_cpu(p, new_cpu);
1898 }
1899
1900 struct migration_req {
1901 struct list_head list;
1902
1903 struct task_struct *task;
1904 int dest_cpu;
1905
1906 struct completion done;
1907 };
1908
1909 /*
1910 * The task's runqueue lock must be held.
1911 * Returns true if you have to wait for migration thread.
1912 */
1913 static int
migrate_task(struct task_struct * p,int dest_cpu,struct migration_req * req)1914 migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1915 {
1916 struct rq *rq = task_rq(p);
1917
1918 /*
1919 * If the task is not on a runqueue (and not running), then
1920 * it is sufficient to simply update the task's cpu field.
1921 */
1922 if (!p->se.on_rq && !task_running(rq, p)) {
1923 set_task_cpu(p, dest_cpu);
1924 return 0;
1925 }
1926
1927 init_completion(&req->done);
1928 req->task = p;
1929 req->dest_cpu = dest_cpu;
1930 list_add(&req->list, &rq->migration_queue);
1931
1932 return 1;
1933 }
1934
1935 /*
1936 * wait_task_inactive - wait for a thread to unschedule.
1937 *
1938 * If @match_state is nonzero, it's the @p->state value just checked and
1939 * not expected to change. If it changes, i.e. @p might have woken up,
1940 * then return zero. When we succeed in waiting for @p to be off its CPU,
1941 * we return a positive number (its total switch count). If a second call
1942 * a short while later returns the same number, the caller can be sure that
1943 * @p has remained unscheduled the whole time.
1944 *
1945 * The caller must ensure that the task *will* unschedule sometime soon,
1946 * else this function might spin for a *long* time. This function can't
1947 * be called with interrupts off, or it may introduce deadlock with
1948 * smp_call_function() if an IPI is sent by the same process we are
1949 * waiting to become inactive.
1950 */
wait_task_inactive(struct task_struct * p,long match_state)1951 unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1952 {
1953 unsigned long flags;
1954 int running, on_rq;
1955 unsigned long ncsw;
1956 struct rq *rq;
1957
1958 for (;;) {
1959 /*
1960 * We do the initial early heuristics without holding
1961 * any task-queue locks at all. We'll only try to get
1962 * the runqueue lock when things look like they will
1963 * work out!
1964 */
1965 rq = task_rq(p);
1966
1967 /*
1968 * If the task is actively running on another CPU
1969 * still, just relax and busy-wait without holding
1970 * any locks.
1971 *
1972 * NOTE! Since we don't hold any locks, it's not
1973 * even sure that "rq" stays as the right runqueue!
1974 * But we don't care, since "task_running()" will
1975 * return false if the runqueue has changed and p
1976 * is actually now running somewhere else!
1977 */
1978 while (task_running(rq, p)) {
1979 if (match_state && unlikely(p->state != match_state))
1980 return 0;
1981 cpu_relax();
1982 }
1983
1984 /*
1985 * Ok, time to look more closely! We need the rq
1986 * lock now, to be *sure*. If we're wrong, we'll
1987 * just go back and repeat.
1988 */
1989 rq = task_rq_lock(p, &flags);
1990 trace_sched_wait_task(rq, p);
1991 running = task_running(rq, p);
1992 on_rq = p->se.on_rq;
1993 ncsw = 0;
1994 if (!match_state || p->state == match_state)
1995 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1996 task_rq_unlock(rq, &flags);
1997
1998 /*
1999 * If it changed from the expected state, bail out now.
2000 */
2001 if (unlikely(!ncsw))
2002 break;
2003
2004 /*
2005 * Was it really running after all now that we
2006 * checked with the proper locks actually held?
2007 *
2008 * Oops. Go back and try again..
2009 */
2010 if (unlikely(running)) {
2011 cpu_relax();
2012 continue;
2013 }
2014
2015 /*
2016 * It's not enough that it's not actively running,
2017 * it must be off the runqueue _entirely_, and not
2018 * preempted!
2019 *
2020 * So if it wa still runnable (but just not actively
2021 * running right now), it's preempted, and we should
2022 * yield - it could be a while.
2023 */
2024 if (unlikely(on_rq)) {
2025 schedule_timeout_uninterruptible(1);
2026 continue;
2027 }
2028
2029 /*
2030 * Ahh, all good. It wasn't running, and it wasn't
2031 * runnable, which means that it will never become
2032 * running in the future either. We're all done!
2033 */
2034 break;
2035 }
2036
2037 return ncsw;
2038 }
2039
2040 /***
2041 * kick_process - kick a running thread to enter/exit the kernel
2042 * @p: the to-be-kicked thread
2043 *
2044 * Cause a process which is running on another CPU to enter
2045 * kernel-mode, without any delay. (to get signals handled.)
2046 *
2047 * NOTE: this function doesnt have to take the runqueue lock,
2048 * because all it wants to ensure is that the remote task enters
2049 * the kernel. If the IPI races and the task has been migrated
2050 * to another CPU then no harm is done and the purpose has been
2051 * achieved as well.
2052 */
kick_process(struct task_struct * p)2053 void kick_process(struct task_struct *p)
2054 {
2055 int cpu;
2056
2057 preempt_disable();
2058 cpu = task_cpu(p);
2059 if ((cpu != smp_processor_id()) && task_curr(p))
2060 smp_send_reschedule(cpu);
2061 preempt_enable();
2062 }
2063
2064 /*
2065 * Return a low guess at the load of a migration-source cpu weighted
2066 * according to the scheduling class and "nice" value.
2067 *
2068 * We want to under-estimate the load of migration sources, to
2069 * balance conservatively.
2070 */
source_load(int cpu,int type)2071 static unsigned long source_load(int cpu, int type)
2072 {
2073 struct rq *rq = cpu_rq(cpu);
2074 unsigned long total = weighted_cpuload(cpu);
2075
2076 if (type == 0 || !sched_feat(LB_BIAS))
2077 return total;
2078
2079 return min(rq->cpu_load[type-1], total);
2080 }
2081
2082 /*
2083 * Return a high guess at the load of a migration-target cpu weighted
2084 * according to the scheduling class and "nice" value.
2085 */
target_load(int cpu,int type)2086 static unsigned long target_load(int cpu, int type)
2087 {
2088 struct rq *rq = cpu_rq(cpu);
2089 unsigned long total = weighted_cpuload(cpu);
2090
2091 if (type == 0 || !sched_feat(LB_BIAS))
2092 return total;
2093
2094 return max(rq->cpu_load[type-1], total);
2095 }
2096
2097 /*
2098 * find_idlest_group finds and returns the least busy CPU group within the
2099 * domain.
2100 */
2101 static struct sched_group *
find_idlest_group(struct sched_domain * sd,struct task_struct * p,int this_cpu)2102 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2103 {
2104 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
2105 unsigned long min_load = ULONG_MAX, this_load = 0;
2106 int load_idx = sd->forkexec_idx;
2107 int imbalance = 100 + (sd->imbalance_pct-100)/2;
2108
2109 do {
2110 unsigned long load, avg_load;
2111 int local_group;
2112 int i;
2113
2114 /* Skip over this group if it has no CPUs allowed */
2115 if (!cpumask_intersects(sched_group_cpus(group),
2116 &p->cpus_allowed))
2117 continue;
2118
2119 local_group = cpumask_test_cpu(this_cpu,
2120 sched_group_cpus(group));
2121
2122 /* Tally up the load of all CPUs in the group */
2123 avg_load = 0;
2124
2125 for_each_cpu(i, sched_group_cpus(group)) {
2126 /* Bias balancing toward cpus of our domain */
2127 if (local_group)
2128 load = source_load(i, load_idx);
2129 else
2130 load = target_load(i, load_idx);
2131
2132 avg_load += load;
2133 }
2134
2135 /* Adjust by relative CPU power of the group */
2136 avg_load = sg_div_cpu_power(group,
2137 avg_load * SCHED_LOAD_SCALE);
2138
2139 if (local_group) {
2140 this_load = avg_load;
2141 this = group;
2142 } else if (avg_load < min_load) {
2143 min_load = avg_load;
2144 idlest = group;
2145 }
2146 } while (group = group->next, group != sd->groups);
2147
2148 if (!idlest || 100*this_load < imbalance*min_load)
2149 return NULL;
2150 return idlest;
2151 }
2152
2153 /*
2154 * find_idlest_cpu - find the idlest cpu among the cpus in group.
2155 */
2156 static int
find_idlest_cpu(struct sched_group * group,struct task_struct * p,int this_cpu)2157 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
2158 {
2159 unsigned long load, min_load = ULONG_MAX;
2160 int idlest = -1;
2161 int i;
2162
2163 /* Traverse only the allowed CPUs */
2164 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
2165 load = weighted_cpuload(i);
2166
2167 if (load < min_load || (load == min_load && i == this_cpu)) {
2168 min_load = load;
2169 idlest = i;
2170 }
2171 }
2172
2173 return idlest;
2174 }
2175
2176 /*
2177 * sched_balance_self: balance the current task (running on cpu) in domains
2178 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
2179 * SD_BALANCE_EXEC.
2180 *
2181 * Balance, ie. select the least loaded group.
2182 *
2183 * Returns the target CPU number, or the same CPU if no balancing is needed.
2184 *
2185 * preempt must be disabled.
2186 */
sched_balance_self(int cpu,int flag)2187 static int sched_balance_self(int cpu, int flag)
2188 {
2189 struct task_struct *t = current;
2190 struct sched_domain *tmp, *sd = NULL;
2191
2192 for_each_domain(cpu, tmp) {
2193 /*
2194 * If power savings logic is enabled for a domain, stop there.
2195 */
2196 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
2197 break;
2198 if (tmp->flags & flag)
2199 sd = tmp;
2200 }
2201
2202 if (sd)
2203 update_shares(sd);
2204
2205 while (sd) {
2206 struct sched_group *group;
2207 int new_cpu, weight;
2208
2209 if (!(sd->flags & flag)) {
2210 sd = sd->child;
2211 continue;
2212 }
2213
2214 group = find_idlest_group(sd, t, cpu);
2215 if (!group) {
2216 sd = sd->child;
2217 continue;
2218 }
2219
2220 new_cpu = find_idlest_cpu(group, t, cpu);
2221 if (new_cpu == -1 || new_cpu == cpu) {
2222 /* Now try balancing at a lower domain level of cpu */
2223 sd = sd->child;
2224 continue;
2225 }
2226
2227 /* Now try balancing at a lower domain level of new_cpu */
2228 cpu = new_cpu;
2229 weight = cpumask_weight(sched_domain_span(sd));
2230 sd = NULL;
2231 for_each_domain(cpu, tmp) {
2232 if (weight <= cpumask_weight(sched_domain_span(tmp)))
2233 break;
2234 if (tmp->flags & flag)
2235 sd = tmp;
2236 }
2237 /* while loop will break here if sd == NULL */
2238 }
2239
2240 return cpu;
2241 }
2242
2243 #endif /* CONFIG_SMP */
2244
2245 /***
2246 * try_to_wake_up - wake up a thread
2247 * @p: the to-be-woken-up thread
2248 * @state: the mask of task states that can be woken
2249 * @sync: do a synchronous wakeup?
2250 *
2251 * Put it on the run-queue if it's not already there. The "current"
2252 * thread is always on the run-queue (except when the actual
2253 * re-schedule is in progress), and as such you're allowed to do
2254 * the simpler "current->state = TASK_RUNNING" to mark yourself
2255 * runnable without the overhead of this.
2256 *
2257 * returns failure only if the task is already active.
2258 */
try_to_wake_up(struct task_struct * p,unsigned int state,int sync)2259 static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2260 {
2261 int cpu, orig_cpu, this_cpu, success = 0;
2262 unsigned long flags;
2263 long old_state;
2264 struct rq *rq;
2265
2266 if (!sched_feat(SYNC_WAKEUPS))
2267 sync = 0;
2268
2269 #ifdef CONFIG_SMP
2270 if (sched_feat(LB_WAKEUP_UPDATE)) {
2271 struct sched_domain *sd;
2272
2273 this_cpu = raw_smp_processor_id();
2274 cpu = task_cpu(p);
2275
2276 for_each_domain(this_cpu, sd) {
2277 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2278 update_shares(sd);
2279 break;
2280 }
2281 }
2282 }
2283 #endif
2284
2285 smp_wmb();
2286 rq = task_rq_lock(p, &flags);
2287 update_rq_clock(rq);
2288 old_state = p->state;
2289 if (!(old_state & state))
2290 goto out;
2291
2292 if (p->se.on_rq)
2293 goto out_running;
2294
2295 cpu = task_cpu(p);
2296 orig_cpu = cpu;
2297 this_cpu = smp_processor_id();
2298
2299 #ifdef CONFIG_SMP
2300 if (unlikely(task_running(rq, p)))
2301 goto out_activate;
2302
2303 cpu = p->sched_class->select_task_rq(p, sync);
2304 if (cpu != orig_cpu) {
2305 set_task_cpu(p, cpu);
2306 task_rq_unlock(rq, &flags);
2307 /* might preempt at this point */
2308 rq = task_rq_lock(p, &flags);
2309 old_state = p->state;
2310 if (!(old_state & state))
2311 goto out;
2312 if (p->se.on_rq)
2313 goto out_running;
2314
2315 this_cpu = smp_processor_id();
2316 cpu = task_cpu(p);
2317 }
2318
2319 #ifdef CONFIG_SCHEDSTATS
2320 schedstat_inc(rq, ttwu_count);
2321 if (cpu == this_cpu)
2322 schedstat_inc(rq, ttwu_local);
2323 else {
2324 struct sched_domain *sd;
2325 for_each_domain(this_cpu, sd) {
2326 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2327 schedstat_inc(sd, ttwu_wake_remote);
2328 break;
2329 }
2330 }
2331 }
2332 #endif /* CONFIG_SCHEDSTATS */
2333
2334 out_activate:
2335 #endif /* CONFIG_SMP */
2336 schedstat_inc(p, se.nr_wakeups);
2337 if (sync)
2338 schedstat_inc(p, se.nr_wakeups_sync);
2339 if (orig_cpu != cpu)
2340 schedstat_inc(p, se.nr_wakeups_migrate);
2341 if (cpu == this_cpu)
2342 schedstat_inc(p, se.nr_wakeups_local);
2343 else
2344 schedstat_inc(p, se.nr_wakeups_remote);
2345 activate_task(rq, p, 1);
2346 success = 1;
2347
2348 out_running:
2349 trace_sched_wakeup(rq, p, success);
2350 check_preempt_curr(rq, p, sync);
2351
2352 p->state = TASK_RUNNING;
2353 #ifdef CONFIG_SMP
2354 if (p->sched_class->task_wake_up)
2355 p->sched_class->task_wake_up(rq, p);
2356 #endif
2357 out:
2358 current->se.last_wakeup = current->se.sum_exec_runtime;
2359
2360 task_rq_unlock(rq, &flags);
2361
2362 return success;
2363 }
2364
wake_up_process(struct task_struct * p)2365 int wake_up_process(struct task_struct *p)
2366 {
2367 return try_to_wake_up(p, TASK_ALL, 0);
2368 }
2369 EXPORT_SYMBOL(wake_up_process);
2370
wake_up_state(struct task_struct * p,unsigned int state)2371 int wake_up_state(struct task_struct *p, unsigned int state)
2372 {
2373 return try_to_wake_up(p, state, 0);
2374 }
2375
2376 /*
2377 * Perform scheduler related setup for a newly forked process p.
2378 * p is forked by current.
2379 *
2380 * __sched_fork() is basic setup used by init_idle() too:
2381 */
__sched_fork(struct task_struct * p)2382 static void __sched_fork(struct task_struct *p)
2383 {
2384 p->se.exec_start = 0;
2385 p->se.sum_exec_runtime = 0;
2386 p->se.prev_sum_exec_runtime = 0;
2387 p->se.last_wakeup = 0;
2388 p->se.avg_overlap = 0;
2389
2390 #ifdef CONFIG_SCHEDSTATS
2391 p->se.wait_start = 0;
2392 p->se.sum_sleep_runtime = 0;
2393 p->se.sleep_start = 0;
2394 p->se.block_start = 0;
2395 p->se.sleep_max = 0;
2396 p->se.block_max = 0;
2397 p->se.exec_max = 0;
2398 p->se.slice_max = 0;
2399 p->se.wait_max = 0;
2400 #endif
2401
2402 INIT_LIST_HEAD(&p->rt.run_list);
2403 p->se.on_rq = 0;
2404 INIT_LIST_HEAD(&p->se.group_node);
2405
2406 #ifdef CONFIG_PREEMPT_NOTIFIERS
2407 INIT_HLIST_HEAD(&p->preempt_notifiers);
2408 #endif
2409
2410 /*
2411 * We mark the process as running here, but have not actually
2412 * inserted it onto the runqueue yet. This guarantees that
2413 * nobody will actually run it, and a signal or other external
2414 * event cannot wake it up and insert it on the runqueue either.
2415 */
2416 p->state = TASK_RUNNING;
2417 }
2418
2419 /*
2420 * fork()/clone()-time setup:
2421 */
sched_fork(struct task_struct * p,int clone_flags)2422 void sched_fork(struct task_struct *p, int clone_flags)
2423 {
2424 int cpu = get_cpu();
2425
2426 __sched_fork(p);
2427
2428 #ifdef CONFIG_SMP
2429 cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
2430 #endif
2431 set_task_cpu(p, cpu);
2432
2433 /*
2434 * Make sure we do not leak PI boosting priority to the child:
2435 */
2436 p->prio = current->normal_prio;
2437 if (!rt_prio(p->prio))
2438 p->sched_class = &fair_sched_class;
2439
2440 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2441 if (likely(sched_info_on()))
2442 memset(&p->sched_info, 0, sizeof(p->sched_info));
2443 #endif
2444 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
2445 p->oncpu = 0;
2446 #endif
2447 #ifdef CONFIG_PREEMPT
2448 /* Want to start with kernel preemption disabled. */
2449 task_thread_info(p)->preempt_count = 1;
2450 #endif
2451 put_cpu();
2452 }
2453
2454 /*
2455 * wake_up_new_task - wake up a newly created task for the first time.
2456 *
2457 * This function will do some initial scheduler statistics housekeeping
2458 * that must be done for every newly created context, then puts the task
2459 * on the runqueue and wakes it.
2460 */
wake_up_new_task(struct task_struct * p,unsigned long clone_flags)2461 void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2462 {
2463 unsigned long flags;
2464 struct rq *rq;
2465
2466 rq = task_rq_lock(p, &flags);
2467 BUG_ON(p->state != TASK_RUNNING);
2468 update_rq_clock(rq);
2469
2470 p->prio = effective_prio(p);
2471
2472 if (!p->sched_class->task_new || !current->se.on_rq) {
2473 activate_task(rq, p, 0);
2474 } else {
2475 /*
2476 * Let the scheduling class do new task startup
2477 * management (if any):
2478 */
2479 p->sched_class->task_new(rq, p);
2480 inc_nr_running(rq);
2481 }
2482 trace_sched_wakeup_new(rq, p, 1);
2483 check_preempt_curr(rq, p, 0);
2484 #ifdef CONFIG_SMP
2485 if (p->sched_class->task_wake_up)
2486 p->sched_class->task_wake_up(rq, p);
2487 #endif
2488 task_rq_unlock(rq, &flags);
2489 }
2490
2491 #ifdef CONFIG_PREEMPT_NOTIFIERS
2492
2493 /**
2494 * preempt_notifier_register - tell me when current is being being preempted & rescheduled
2495 * @notifier: notifier struct to register
2496 */
preempt_notifier_register(struct preempt_notifier * notifier)2497 void preempt_notifier_register(struct preempt_notifier *notifier)
2498 {
2499 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
2500 }
2501 EXPORT_SYMBOL_GPL(preempt_notifier_register);
2502
2503 /**
2504 * preempt_notifier_unregister - no longer interested in preemption notifications
2505 * @notifier: notifier struct to unregister
2506 *
2507 * This is safe to call from within a preemption notifier.
2508 */
preempt_notifier_unregister(struct preempt_notifier * notifier)2509 void preempt_notifier_unregister(struct preempt_notifier *notifier)
2510 {
2511 hlist_del(¬ifier->link);
2512 }
2513 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2514
fire_sched_in_preempt_notifiers(struct task_struct * curr)2515 static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2516 {
2517 struct preempt_notifier *notifier;
2518 struct hlist_node *node;
2519
2520 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2521 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2522 }
2523
2524 static void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)2525 fire_sched_out_preempt_notifiers(struct task_struct *curr,
2526 struct task_struct *next)
2527 {
2528 struct preempt_notifier *notifier;
2529 struct hlist_node *node;
2530
2531 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2532 notifier->ops->sched_out(notifier, next);
2533 }
2534
2535 #else /* !CONFIG_PREEMPT_NOTIFIERS */
2536
fire_sched_in_preempt_notifiers(struct task_struct * curr)2537 static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2538 {
2539 }
2540
2541 static void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)2542 fire_sched_out_preempt_notifiers(struct task_struct *curr,
2543 struct task_struct *next)
2544 {
2545 }
2546
2547 #endif /* CONFIG_PREEMPT_NOTIFIERS */
2548
2549 /**
2550 * prepare_task_switch - prepare to switch tasks
2551 * @rq: the runqueue preparing to switch
2552 * @prev: the current task that is being switched out
2553 * @next: the task we are going to switch to.
2554 *
2555 * This is called with the rq lock held and interrupts off. It must
2556 * be paired with a subsequent finish_task_switch after the context
2557 * switch.
2558 *
2559 * prepare_task_switch sets up locking and calls architecture specific
2560 * hooks.
2561 */
2562 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)2563 prepare_task_switch(struct rq *rq, struct task_struct *prev,
2564 struct task_struct *next)
2565 {
2566 fire_sched_out_preempt_notifiers(prev, next);
2567 prepare_lock_switch(rq, next);
2568 prepare_arch_switch(next);
2569 }
2570
2571 /**
2572 * finish_task_switch - clean up after a task-switch
2573 * @rq: runqueue associated with task-switch
2574 * @prev: the thread we just switched away from.
2575 *
2576 * finish_task_switch must be called after the context switch, paired
2577 * with a prepare_task_switch call before the context switch.
2578 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2579 * and do any other architecture-specific cleanup actions.
2580 *
2581 * Note that we may have delayed dropping an mm in context_switch(). If
2582 * so, we finish that here outside of the runqueue lock. (Doing it
2583 * with the lock held can cause deadlocks; see schedule() for
2584 * details.)
2585 */
finish_task_switch(struct rq * rq,struct task_struct * prev)2586 static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2587 __releases(rq->lock)
2588 {
2589 struct mm_struct *mm = rq->prev_mm;
2590 long prev_state;
2591
2592 rq->prev_mm = NULL;
2593
2594 /*
2595 * A task struct has one reference for the use as "current".
2596 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
2597 * schedule one last time. The schedule call will never return, and
2598 * the scheduled task must drop that reference.
2599 * The test for TASK_DEAD must occur while the runqueue locks are
2600 * still held, otherwise prev could be scheduled on another cpu, die
2601 * there before we look at prev->state, and then the reference would
2602 * be dropped twice.
2603 * Manfred Spraul <manfred@colorfullife.com>
2604 */
2605 prev_state = prev->state;
2606 finish_arch_switch(prev);
2607 finish_lock_switch(rq, prev);
2608 #ifdef CONFIG_SMP
2609 if (current->sched_class->post_schedule)
2610 current->sched_class->post_schedule(rq);
2611 #endif
2612
2613 fire_sched_in_preempt_notifiers(current);
2614 if (mm)
2615 mmdrop(mm);
2616 if (unlikely(prev_state == TASK_DEAD)) {
2617 /*
2618 * Remove function-return probe instances associated with this
2619 * task and put them back on the free list.
2620 */
2621 kprobe_flush_task(prev);
2622 put_task_struct(prev);
2623 }
2624 }
2625
2626 /**
2627 * schedule_tail - first thing a freshly forked thread must call.
2628 * @prev: the thread we just switched away from.
2629 */
schedule_tail(struct task_struct * prev)2630 asmlinkage void schedule_tail(struct task_struct *prev)
2631 __releases(rq->lock)
2632 {
2633 struct rq *rq = this_rq();
2634
2635 finish_task_switch(rq, prev);
2636 #ifdef __ARCH_WANT_UNLOCKED_CTXSW
2637 /* In this case, finish_task_switch does not reenable preemption */
2638 preempt_enable();
2639 #endif
2640 if (current->set_child_tid)
2641 put_user(task_pid_vnr(current), current->set_child_tid);
2642 }
2643
2644 #ifdef CONFIG_QEMU_TRACE
2645 void qemu_trace_cs(struct task_struct *next);
2646 #endif
2647
2648 /*
2649 * context_switch - switch to the new MM and the new
2650 * thread's register state.
2651 */
2652 static inline void
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)2653 context_switch(struct rq *rq, struct task_struct *prev,
2654 struct task_struct *next)
2655 {
2656 struct mm_struct *mm, *oldmm;
2657
2658 prepare_task_switch(rq, prev, next);
2659 trace_sched_switch(rq, prev, next);
2660 mm = next->mm;
2661 oldmm = prev->active_mm;
2662 /*
2663 * For paravirt, this is coupled with an exit in switch_to to
2664 * combine the page table reload and the switch backend into
2665 * one hypercall.
2666 */
2667 arch_enter_lazy_cpu_mode();
2668
2669 if (unlikely(!mm)) {
2670 next->active_mm = oldmm;
2671 atomic_inc(&oldmm->mm_count);
2672 enter_lazy_tlb(oldmm, next);
2673 } else
2674 switch_mm(oldmm, mm, next);
2675
2676 if (unlikely(!prev->mm)) {
2677 prev->active_mm = NULL;
2678 rq->prev_mm = oldmm;
2679 }
2680 /*
2681 * Since the runqueue lock will be released by the next
2682 * task (which is an invalid locking op but in the case
2683 * of the scheduler it's an obvious special-case), so we
2684 * do an early lockdep release here:
2685 */
2686 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
2687 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2688 #endif
2689
2690 #ifdef CONFIG_QEMU_TRACE
2691 /* Emit a trace record for the context switch. */
2692 qemu_trace_cs(next);
2693 #endif
2694
2695 /* Here we just switch the register state and the stack. */
2696 switch_to(prev, next, prev);
2697
2698 barrier();
2699 /*
2700 * this_rq must be evaluated again because prev may have moved
2701 * CPUs since it called schedule(), thus the 'rq' on its stack
2702 * frame will be invalid.
2703 */
2704 finish_task_switch(this_rq(), prev);
2705 }
2706
2707 /*
2708 * nr_running, nr_uninterruptible and nr_context_switches:
2709 *
2710 * externally visible scheduler statistics: current number of runnable
2711 * threads, current number of uninterruptible-sleeping threads, total
2712 * number of context switches performed since bootup.
2713 */
nr_running(void)2714 unsigned long nr_running(void)
2715 {
2716 unsigned long i, sum = 0;
2717
2718 for_each_online_cpu(i)
2719 sum += cpu_rq(i)->nr_running;
2720
2721 return sum;
2722 }
2723
nr_uninterruptible(void)2724 unsigned long nr_uninterruptible(void)
2725 {
2726 unsigned long i, sum = 0;
2727
2728 for_each_possible_cpu(i)
2729 sum += cpu_rq(i)->nr_uninterruptible;
2730
2731 /*
2732 * Since we read the counters lockless, it might be slightly
2733 * inaccurate. Do not allow it to go below zero though:
2734 */
2735 if (unlikely((long)sum < 0))
2736 sum = 0;
2737
2738 return sum;
2739 }
2740
nr_context_switches(void)2741 unsigned long long nr_context_switches(void)
2742 {
2743 int i;
2744 unsigned long long sum = 0;
2745
2746 for_each_possible_cpu(i)
2747 sum += cpu_rq(i)->nr_switches;
2748
2749 return sum;
2750 }
2751
nr_iowait(void)2752 unsigned long nr_iowait(void)
2753 {
2754 unsigned long i, sum = 0;
2755
2756 for_each_possible_cpu(i)
2757 sum += atomic_read(&cpu_rq(i)->nr_iowait);
2758
2759 return sum;
2760 }
2761
nr_active(void)2762 unsigned long nr_active(void)
2763 {
2764 unsigned long i, running = 0, uninterruptible = 0;
2765
2766 for_each_online_cpu(i) {
2767 running += cpu_rq(i)->nr_running;
2768 uninterruptible += cpu_rq(i)->nr_uninterruptible;
2769 }
2770
2771 if (unlikely((long)uninterruptible < 0))
2772 uninterruptible = 0;
2773
2774 return running + uninterruptible;
2775 }
2776
2777 /*
2778 * Update rq->cpu_load[] statistics. This function is usually called every
2779 * scheduler tick (TICK_NSEC).
2780 */
update_cpu_load(struct rq * this_rq)2781 static void update_cpu_load(struct rq *this_rq)
2782 {
2783 unsigned long this_load = this_rq->load.weight;
2784 int i, scale;
2785
2786 this_rq->nr_load_updates++;
2787
2788 /* Update our load: */
2789 for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
2790 unsigned long old_load, new_load;
2791
2792 /* scale is effectively 1 << i now, and >> i divides by scale */
2793
2794 old_load = this_rq->cpu_load[i];
2795 new_load = this_load;
2796 /*
2797 * Round up the averaging division if load is increasing. This
2798 * prevents us from getting stuck on 9 if the load is 10, for
2799 * example.
2800 */
2801 if (new_load > old_load)
2802 new_load += scale-1;
2803 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
2804 }
2805 }
2806
2807 #ifdef CONFIG_SMP
2808
2809 /*
2810 * double_rq_lock - safely lock two runqueues
2811 *
2812 * Note this does not disable interrupts like task_rq_lock,
2813 * you need to do so manually before calling.
2814 */
double_rq_lock(struct rq * rq1,struct rq * rq2)2815 static void double_rq_lock(struct rq *rq1, struct rq *rq2)
2816 __acquires(rq1->lock)
2817 __acquires(rq2->lock)
2818 {
2819 BUG_ON(!irqs_disabled());
2820 if (rq1 == rq2) {
2821 spin_lock(&rq1->lock);
2822 __acquire(rq2->lock); /* Fake it out ;) */
2823 } else {
2824 if (rq1 < rq2) {
2825 spin_lock(&rq1->lock);
2826 spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
2827 } else {
2828 spin_lock(&rq2->lock);
2829 spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
2830 }
2831 }
2832 update_rq_clock(rq1);
2833 update_rq_clock(rq2);
2834 }
2835
2836 /*
2837 * double_rq_unlock - safely unlock two runqueues
2838 *
2839 * Note this does not restore interrupts like task_rq_unlock,
2840 * you need to do so manually after calling.
2841 */
double_rq_unlock(struct rq * rq1,struct rq * rq2)2842 static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2843 __releases(rq1->lock)
2844 __releases(rq2->lock)
2845 {
2846 spin_unlock(&rq1->lock);
2847 if (rq1 != rq2)
2848 spin_unlock(&rq2->lock);
2849 else
2850 __release(rq2->lock);
2851 }
2852
2853 /*
2854 * If dest_cpu is allowed for this process, migrate the task to it.
2855 * This is accomplished by forcing the cpu_allowed mask to only
2856 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
2857 * the cpu_allowed mask is restored.
2858 */
sched_migrate_task(struct task_struct * p,int dest_cpu)2859 static void sched_migrate_task(struct task_struct *p, int dest_cpu)
2860 {
2861 struct migration_req req;
2862 unsigned long flags;
2863 struct rq *rq;
2864
2865 rq = task_rq_lock(p, &flags);
2866 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
2867 || unlikely(!cpu_active(dest_cpu)))
2868 goto out;
2869
2870 /* force the process onto the specified CPU */
2871 if (migrate_task(p, dest_cpu, &req)) {
2872 /* Need to wait for migration thread (might exit: take ref). */
2873 struct task_struct *mt = rq->migration_thread;
2874
2875 get_task_struct(mt);
2876 task_rq_unlock(rq, &flags);
2877 wake_up_process(mt);
2878 put_task_struct(mt);
2879 wait_for_completion(&req.done);
2880
2881 return;
2882 }
2883 out:
2884 task_rq_unlock(rq, &flags);
2885 }
2886
2887 /*
2888 * sched_exec - execve() is a valuable balancing opportunity, because at
2889 * this point the task has the smallest effective memory and cache footprint.
2890 */
sched_exec(void)2891 void sched_exec(void)
2892 {
2893 int new_cpu, this_cpu = get_cpu();
2894 new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
2895 put_cpu();
2896 if (new_cpu != this_cpu)
2897 sched_migrate_task(current, new_cpu);
2898 }
2899
2900 /*
2901 * pull_task - move a task from a remote runqueue to the local runqueue.
2902 * Both runqueues must be locked.
2903 */
pull_task(struct rq * src_rq,struct task_struct * p,struct rq * this_rq,int this_cpu)2904 static void pull_task(struct rq *src_rq, struct task_struct *p,
2905 struct rq *this_rq, int this_cpu)
2906 {
2907 deactivate_task(src_rq, p, 0);
2908 set_task_cpu(p, this_cpu);
2909 activate_task(this_rq, p, 0);
2910 /*
2911 * Note that idle threads have a prio of MAX_PRIO, for this test
2912 * to be always true for them.
2913 */
2914 check_preempt_curr(this_rq, p, 0);
2915 }
2916
2917 /*
2918 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2919 */
2920 static
can_migrate_task(struct task_struct * p,struct rq * rq,int this_cpu,struct sched_domain * sd,enum cpu_idle_type idle,int * all_pinned)2921 int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2922 struct sched_domain *sd, enum cpu_idle_type idle,
2923 int *all_pinned)
2924 {
2925 /*
2926 * We do not migrate tasks that are:
2927 * 1) running (obviously), or
2928 * 2) cannot be migrated to this CPU due to cpus_allowed, or
2929 * 3) are cache-hot on their current CPU.
2930 */
2931 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
2932 schedstat_inc(p, se.nr_failed_migrations_affine);
2933 return 0;
2934 }
2935 *all_pinned = 0;
2936
2937 if (task_running(rq, p)) {
2938 schedstat_inc(p, se.nr_failed_migrations_running);
2939 return 0;
2940 }
2941
2942 /*
2943 * Aggressive migration if:
2944 * 1) task is cache cold, or
2945 * 2) too many balance attempts have failed.
2946 */
2947
2948 if (!task_hot(p, rq->clock, sd) ||
2949 sd->nr_balance_failed > sd->cache_nice_tries) {
2950 #ifdef CONFIG_SCHEDSTATS
2951 if (task_hot(p, rq->clock, sd)) {
2952 schedstat_inc(sd, lb_hot_gained[idle]);
2953 schedstat_inc(p, se.nr_forced_migrations);
2954 }
2955 #endif
2956 return 1;
2957 }
2958
2959 if (task_hot(p, rq->clock, sd)) {
2960 schedstat_inc(p, se.nr_failed_migrations_hot);
2961 return 0;
2962 }
2963 return 1;
2964 }
2965
2966 static unsigned long
balance_tasks(struct rq * this_rq,int this_cpu,struct rq * busiest,unsigned long max_load_move,struct sched_domain * sd,enum cpu_idle_type idle,int * all_pinned,int * this_best_prio,struct rq_iterator * iterator)2967 balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2968 unsigned long max_load_move, struct sched_domain *sd,
2969 enum cpu_idle_type idle, int *all_pinned,
2970 int *this_best_prio, struct rq_iterator *iterator)
2971 {
2972 int loops = 0, pulled = 0, pinned = 0;
2973 struct task_struct *p;
2974 long rem_load_move = max_load_move;
2975
2976 if (max_load_move == 0)
2977 goto out;
2978
2979 pinned = 1;
2980
2981 /*
2982 * Start the load-balancing iterator:
2983 */
2984 p = iterator->start(iterator->arg);
2985 next:
2986 if (!p || loops++ > sysctl_sched_nr_migrate)
2987 goto out;
2988
2989 if ((p->se.load.weight >> 1) > rem_load_move ||
2990 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
2991 p = iterator->next(iterator->arg);
2992 goto next;
2993 }
2994
2995 pull_task(busiest, p, this_rq, this_cpu);
2996 pulled++;
2997 rem_load_move -= p->se.load.weight;
2998
2999 /*
3000 * We only want to steal up to the prescribed amount of weighted load.
3001 */
3002 if (rem_load_move > 0) {
3003 if (p->prio < *this_best_prio)
3004 *this_best_prio = p->prio;
3005 p = iterator->next(iterator->arg);
3006 goto next;
3007 }
3008 out:
3009 /*
3010 * Right now, this is one of only two places pull_task() is called,
3011 * so we can safely collect pull_task() stats here rather than
3012 * inside pull_task().
3013 */
3014 schedstat_add(sd, lb_gained[idle], pulled);
3015
3016 if (all_pinned)
3017 *all_pinned = pinned;
3018
3019 return max_load_move - rem_load_move;
3020 }
3021
3022 /*
3023 * move_tasks tries to move up to max_load_move weighted load from busiest to
3024 * this_rq, as part of a balancing operation within domain "sd".
3025 * Returns 1 if successful and 0 otherwise.
3026 *
3027 * Called with both runqueues locked.
3028 */
move_tasks(struct rq * this_rq,int this_cpu,struct rq * busiest,unsigned long max_load_move,struct sched_domain * sd,enum cpu_idle_type idle,int * all_pinned)3029 static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3030 unsigned long max_load_move,
3031 struct sched_domain *sd, enum cpu_idle_type idle,
3032 int *all_pinned)
3033 {
3034 const struct sched_class *class = sched_class_highest;
3035 unsigned long total_load_moved = 0;
3036 int this_best_prio = this_rq->curr->prio;
3037
3038 do {
3039 total_load_moved +=
3040 class->load_balance(this_rq, this_cpu, busiest,
3041 max_load_move - total_load_moved,
3042 sd, idle, all_pinned, &this_best_prio);
3043 class = class->next;
3044
3045 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
3046 break;
3047
3048 } while (class && max_load_move > total_load_moved);
3049
3050 return total_load_moved > 0;
3051 }
3052
3053 static int
iter_move_one_task(struct rq * this_rq,int this_cpu,struct rq * busiest,struct sched_domain * sd,enum cpu_idle_type idle,struct rq_iterator * iterator)3054 iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3055 struct sched_domain *sd, enum cpu_idle_type idle,
3056 struct rq_iterator *iterator)
3057 {
3058 struct task_struct *p = iterator->start(iterator->arg);
3059 int pinned = 0;
3060
3061 while (p) {
3062 if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
3063 pull_task(busiest, p, this_rq, this_cpu);
3064 /*
3065 * Right now, this is only the second place pull_task()
3066 * is called, so we can safely collect pull_task()
3067 * stats here rather than inside pull_task().
3068 */
3069 schedstat_inc(sd, lb_gained[idle]);
3070
3071 return 1;
3072 }
3073 p = iterator->next(iterator->arg);
3074 }
3075
3076 return 0;
3077 }
3078
3079 /*
3080 * move_one_task tries to move exactly one task from busiest to this_rq, as
3081 * part of active balancing operations within "domain".
3082 * Returns 1 if successful and 0 otherwise.
3083 *
3084 * Called with both runqueues locked.
3085 */
move_one_task(struct rq * this_rq,int this_cpu,struct rq * busiest,struct sched_domain * sd,enum cpu_idle_type idle)3086 static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3087 struct sched_domain *sd, enum cpu_idle_type idle)
3088 {
3089 const struct sched_class *class;
3090
3091 for (class = sched_class_highest; class; class = class->next)
3092 if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
3093 return 1;
3094
3095 return 0;
3096 }
3097
3098 /*
3099 * find_busiest_group finds and returns the busiest CPU group within the
3100 * domain. It calculates and returns the amount of weighted load which
3101 * should be moved to restore balance via the imbalance parameter.
3102 */
3103 static struct sched_group *
find_busiest_group(struct sched_domain * sd,int this_cpu,unsigned long * imbalance,enum cpu_idle_type idle,int * sd_idle,const struct cpumask * cpus,int * balance)3104 find_busiest_group(struct sched_domain *sd, int this_cpu,
3105 unsigned long *imbalance, enum cpu_idle_type idle,
3106 int *sd_idle, const struct cpumask *cpus, int *balance)
3107 {
3108 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
3109 unsigned long max_load, avg_load, total_load, this_load, total_pwr;
3110 unsigned long max_pull;
3111 unsigned long busiest_load_per_task, busiest_nr_running;
3112 unsigned long this_load_per_task, this_nr_running;
3113 int load_idx, group_imb = 0;
3114 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3115 int power_savings_balance = 1;
3116 unsigned long leader_nr_running = 0, min_load_per_task = 0;
3117 unsigned long min_nr_running = ULONG_MAX;
3118 struct sched_group *group_min = NULL, *group_leader = NULL;
3119 #endif
3120
3121 max_load = this_load = total_load = total_pwr = 0;
3122 busiest_load_per_task = busiest_nr_running = 0;
3123 this_load_per_task = this_nr_running = 0;
3124
3125 if (idle == CPU_NOT_IDLE)
3126 load_idx = sd->busy_idx;
3127 else if (idle == CPU_NEWLY_IDLE)
3128 load_idx = sd->newidle_idx;
3129 else
3130 load_idx = sd->idle_idx;
3131
3132 do {
3133 unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
3134 int local_group;
3135 int i;
3136 int __group_imb = 0;
3137 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3138 unsigned long sum_nr_running, sum_weighted_load;
3139 unsigned long sum_avg_load_per_task;
3140 unsigned long avg_load_per_task;
3141
3142 local_group = cpumask_test_cpu(this_cpu,
3143 sched_group_cpus(group));
3144
3145 if (local_group)
3146 balance_cpu = cpumask_first(sched_group_cpus(group));
3147
3148 /* Tally up the load of all CPUs in the group */
3149 sum_weighted_load = sum_nr_running = avg_load = 0;
3150 sum_avg_load_per_task = avg_load_per_task = 0;
3151
3152 max_cpu_load = 0;
3153 min_cpu_load = ~0UL;
3154
3155 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3156 struct rq *rq = cpu_rq(i);
3157
3158 if (*sd_idle && rq->nr_running)
3159 *sd_idle = 0;
3160
3161 /* Bias balancing toward cpus of our domain */
3162 if (local_group) {
3163 if (idle_cpu(i) && !first_idle_cpu) {
3164 first_idle_cpu = 1;
3165 balance_cpu = i;
3166 }
3167
3168 load = target_load(i, load_idx);
3169 } else {
3170 load = source_load(i, load_idx);
3171 if (load > max_cpu_load)
3172 max_cpu_load = load;
3173 if (min_cpu_load > load)
3174 min_cpu_load = load;
3175 }
3176
3177 avg_load += load;
3178 sum_nr_running += rq->nr_running;
3179 sum_weighted_load += weighted_cpuload(i);
3180
3181 sum_avg_load_per_task += cpu_avg_load_per_task(i);
3182 }
3183
3184 /*
3185 * First idle cpu or the first cpu(busiest) in this sched group
3186 * is eligible for doing load balancing at this and above
3187 * domains. In the newly idle case, we will allow all the cpu's
3188 * to do the newly idle load balance.
3189 */
3190 if (idle != CPU_NEWLY_IDLE && local_group &&
3191 balance_cpu != this_cpu && balance) {
3192 *balance = 0;
3193 goto ret;
3194 }
3195
3196 total_load += avg_load;
3197 total_pwr += group->__cpu_power;
3198
3199 /* Adjust by relative CPU power of the group */
3200 avg_load = sg_div_cpu_power(group,
3201 avg_load * SCHED_LOAD_SCALE);
3202
3203
3204 /*
3205 * Consider the group unbalanced when the imbalance is larger
3206 * than the average weight of two tasks.
3207 *
3208 * APZ: with cgroup the avg task weight can vary wildly and
3209 * might not be a suitable number - should we keep a
3210 * normalized nr_running number somewhere that negates
3211 * the hierarchy?
3212 */
3213 avg_load_per_task = sg_div_cpu_power(group,
3214 sum_avg_load_per_task * SCHED_LOAD_SCALE);
3215
3216 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3217 __group_imb = 1;
3218
3219 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
3220
3221 if (local_group) {
3222 this_load = avg_load;
3223 this = group;
3224 this_nr_running = sum_nr_running;
3225 this_load_per_task = sum_weighted_load;
3226 } else if (avg_load > max_load &&
3227 (sum_nr_running > group_capacity || __group_imb)) {
3228 max_load = avg_load;
3229 busiest = group;
3230 busiest_nr_running = sum_nr_running;
3231 busiest_load_per_task = sum_weighted_load;
3232 group_imb = __group_imb;
3233 }
3234
3235 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3236 /*
3237 * Busy processors will not participate in power savings
3238 * balance.
3239 */
3240 if (idle == CPU_NOT_IDLE ||
3241 !(sd->flags & SD_POWERSAVINGS_BALANCE))
3242 goto group_next;
3243
3244 /*
3245 * If the local group is idle or completely loaded
3246 * no need to do power savings balance at this domain
3247 */
3248 if (local_group && (this_nr_running >= group_capacity ||
3249 !this_nr_running))
3250 power_savings_balance = 0;
3251
3252 /*
3253 * If a group is already running at full capacity or idle,
3254 * don't include that group in power savings calculations
3255 */
3256 if (!power_savings_balance || sum_nr_running >= group_capacity
3257 || !sum_nr_running)
3258 goto group_next;
3259
3260 /*
3261 * Calculate the group which has the least non-idle load.
3262 * This is the group from where we need to pick up the load
3263 * for saving power
3264 */
3265 if ((sum_nr_running < min_nr_running) ||
3266 (sum_nr_running == min_nr_running &&
3267 cpumask_first(sched_group_cpus(group)) >
3268 cpumask_first(sched_group_cpus(group_min)))) {
3269 group_min = group;
3270 min_nr_running = sum_nr_running;
3271 min_load_per_task = sum_weighted_load /
3272 sum_nr_running;
3273 }
3274
3275 /*
3276 * Calculate the group which is almost near its
3277 * capacity but still has some space to pick up some load
3278 * from other group and save more power
3279 */
3280 if (sum_nr_running <= group_capacity - 1) {
3281 if (sum_nr_running > leader_nr_running ||
3282 (sum_nr_running == leader_nr_running &&
3283 cpumask_first(sched_group_cpus(group)) <
3284 cpumask_first(sched_group_cpus(group_leader)))) {
3285 group_leader = group;
3286 leader_nr_running = sum_nr_running;
3287 }
3288 }
3289 group_next:
3290 #endif
3291 group = group->next;
3292 } while (group != sd->groups);
3293
3294 if (!busiest || this_load >= max_load || busiest_nr_running == 0)
3295 goto out_balanced;
3296
3297 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
3298
3299 if (this_load >= avg_load ||
3300 100*max_load <= sd->imbalance_pct*this_load)
3301 goto out_balanced;
3302
3303 busiest_load_per_task /= busiest_nr_running;
3304 if (group_imb)
3305 busiest_load_per_task = min(busiest_load_per_task, avg_load);
3306
3307 /*
3308 * We're trying to get all the cpus to the average_load, so we don't
3309 * want to push ourselves above the average load, nor do we wish to
3310 * reduce the max loaded cpu below the average load, as either of these
3311 * actions would just result in more rebalancing later, and ping-pong
3312 * tasks around. Thus we look for the minimum possible imbalance.
3313 * Negative imbalances (*we* are more loaded than anyone else) will
3314 * be counted as no imbalance for these purposes -- we can't fix that
3315 * by pulling tasks to us. Be careful of negative numbers as they'll
3316 * appear as very large values with unsigned longs.
3317 */
3318 if (max_load <= busiest_load_per_task)
3319 goto out_balanced;
3320
3321 /*
3322 * In the presence of smp nice balancing, certain scenarios can have
3323 * max load less than avg load(as we skip the groups at or below
3324 * its cpu_power, while calculating max_load..)
3325 */
3326 if (max_load < avg_load) {
3327 *imbalance = 0;
3328 goto small_imbalance;
3329 }
3330
3331 /* Don't want to pull so many tasks that a group would go idle */
3332 max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
3333
3334 /* How much load to actually move to equalise the imbalance */
3335 *imbalance = min(max_pull * busiest->__cpu_power,
3336 (avg_load - this_load) * this->__cpu_power)
3337 / SCHED_LOAD_SCALE;
3338
3339 /*
3340 * if *imbalance is less than the average load per runnable task
3341 * there is no gaurantee that any tasks will be moved so we'll have
3342 * a think about bumping its value to force at least one task to be
3343 * moved
3344 */
3345 if (*imbalance < busiest_load_per_task) {
3346 unsigned long tmp, pwr_now, pwr_move;
3347 unsigned int imbn;
3348
3349 small_imbalance:
3350 pwr_move = pwr_now = 0;
3351 imbn = 2;
3352 if (this_nr_running) {
3353 this_load_per_task /= this_nr_running;
3354 if (busiest_load_per_task > this_load_per_task)
3355 imbn = 1;
3356 } else
3357 this_load_per_task = cpu_avg_load_per_task(this_cpu);
3358
3359 if (max_load - this_load + busiest_load_per_task >=
3360 busiest_load_per_task * imbn) {
3361 *imbalance = busiest_load_per_task;
3362 return busiest;
3363 }
3364
3365 /*
3366 * OK, we don't have enough imbalance to justify moving tasks,
3367 * however we may be able to increase total CPU power used by
3368 * moving them.
3369 */
3370
3371 pwr_now += busiest->__cpu_power *
3372 min(busiest_load_per_task, max_load);
3373 pwr_now += this->__cpu_power *
3374 min(this_load_per_task, this_load);
3375 pwr_now /= SCHED_LOAD_SCALE;
3376
3377 /* Amount of load we'd subtract */
3378 tmp = sg_div_cpu_power(busiest,
3379 busiest_load_per_task * SCHED_LOAD_SCALE);
3380 if (max_load > tmp)
3381 pwr_move += busiest->__cpu_power *
3382 min(busiest_load_per_task, max_load - tmp);
3383
3384 /* Amount of load we'd add */
3385 if (max_load * busiest->__cpu_power <
3386 busiest_load_per_task * SCHED_LOAD_SCALE)
3387 tmp = sg_div_cpu_power(this,
3388 max_load * busiest->__cpu_power);
3389 else
3390 tmp = sg_div_cpu_power(this,
3391 busiest_load_per_task * SCHED_LOAD_SCALE);
3392 pwr_move += this->__cpu_power *
3393 min(this_load_per_task, this_load + tmp);
3394 pwr_move /= SCHED_LOAD_SCALE;
3395
3396 /* Move if we gain throughput */
3397 if (pwr_move > pwr_now)
3398 *imbalance = busiest_load_per_task;
3399 }
3400
3401 return busiest;
3402
3403 out_balanced:
3404 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3405 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3406 goto ret;
3407
3408 if (this == group_leader && group_leader != group_min) {
3409 *imbalance = min_load_per_task;
3410 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
3411 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
3412 cpumask_first(sched_group_cpus(group_leader));
3413 }
3414 return group_min;
3415 }
3416 #endif
3417 ret:
3418 *imbalance = 0;
3419 return NULL;
3420 }
3421
3422 /*
3423 * find_busiest_queue - find the busiest runqueue among the cpus in group.
3424 */
3425 static struct rq *
find_busiest_queue(struct sched_group * group,enum cpu_idle_type idle,unsigned long imbalance,const struct cpumask * cpus)3426 find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3427 unsigned long imbalance, const struct cpumask *cpus)
3428 {
3429 struct rq *busiest = NULL, *rq;
3430 unsigned long max_load = 0;
3431 int i;
3432
3433 for_each_cpu(i, sched_group_cpus(group)) {
3434 unsigned long wl;
3435
3436 if (!cpumask_test_cpu(i, cpus))
3437 continue;
3438
3439 rq = cpu_rq(i);
3440 wl = weighted_cpuload(i);
3441
3442 if (rq->nr_running == 1 && wl > imbalance)
3443 continue;
3444
3445 if (wl > max_load) {
3446 max_load = wl;
3447 busiest = rq;
3448 }
3449 }
3450
3451 return busiest;
3452 }
3453
3454 /*
3455 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
3456 * so long as it is large enough.
3457 */
3458 #define MAX_PINNED_INTERVAL 512
3459
3460 /*
3461 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3462 * tasks if there is an imbalance.
3463 */
load_balance(int this_cpu,struct rq * this_rq,struct sched_domain * sd,enum cpu_idle_type idle,int * balance,struct cpumask * cpus)3464 static int load_balance(int this_cpu, struct rq *this_rq,
3465 struct sched_domain *sd, enum cpu_idle_type idle,
3466 int *balance, struct cpumask *cpus)
3467 {
3468 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
3469 struct sched_group *group;
3470 unsigned long imbalance;
3471 struct rq *busiest;
3472 unsigned long flags;
3473
3474 cpumask_setall(cpus);
3475
3476 /*
3477 * When power savings policy is enabled for the parent domain, idle
3478 * sibling can pick up load irrespective of busy siblings. In this case,
3479 * let the state of idle sibling percolate up as CPU_IDLE, instead of
3480 * portraying it as CPU_NOT_IDLE.
3481 */
3482 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
3483 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3484 sd_idle = 1;
3485
3486 schedstat_inc(sd, lb_count[idle]);
3487
3488 redo:
3489 update_shares(sd);
3490 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
3491 cpus, balance);
3492
3493 if (*balance == 0)
3494 goto out_balanced;
3495
3496 if (!group) {
3497 schedstat_inc(sd, lb_nobusyg[idle]);
3498 goto out_balanced;
3499 }
3500
3501 busiest = find_busiest_queue(group, idle, imbalance, cpus);
3502 if (!busiest) {
3503 schedstat_inc(sd, lb_nobusyq[idle]);
3504 goto out_balanced;
3505 }
3506
3507 BUG_ON(busiest == this_rq);
3508
3509 schedstat_add(sd, lb_imbalance[idle], imbalance);
3510
3511 ld_moved = 0;
3512 if (busiest->nr_running > 1) {
3513 /*
3514 * Attempt to move tasks. If find_busiest_group has found
3515 * an imbalance but busiest->nr_running <= 1, the group is
3516 * still unbalanced. ld_moved simply stays zero, so it is
3517 * correctly treated as an imbalance.
3518 */
3519 local_irq_save(flags);
3520 double_rq_lock(this_rq, busiest);
3521 ld_moved = move_tasks(this_rq, this_cpu, busiest,
3522 imbalance, sd, idle, &all_pinned);
3523 double_rq_unlock(this_rq, busiest);
3524 local_irq_restore(flags);
3525
3526 /*
3527 * some other cpu did the load balance for us.
3528 */
3529 if (ld_moved && this_cpu != smp_processor_id())
3530 resched_cpu(this_cpu);
3531
3532 /* All tasks on this runqueue were pinned by CPU affinity */
3533 if (unlikely(all_pinned)) {
3534 cpumask_clear_cpu(cpu_of(busiest), cpus);
3535 if (!cpumask_empty(cpus))
3536 goto redo;
3537 goto out_balanced;
3538 }
3539 }
3540
3541 if (!ld_moved) {
3542 schedstat_inc(sd, lb_failed[idle]);
3543 sd->nr_balance_failed++;
3544
3545 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
3546
3547 spin_lock_irqsave(&busiest->lock, flags);
3548
3549 /* don't kick the migration_thread, if the curr
3550 * task on busiest cpu can't be moved to this_cpu
3551 */
3552 if (!cpumask_test_cpu(this_cpu,
3553 &busiest->curr->cpus_allowed)) {
3554 spin_unlock_irqrestore(&busiest->lock, flags);
3555 all_pinned = 1;
3556 goto out_one_pinned;
3557 }
3558
3559 if (!busiest->active_balance) {
3560 busiest->active_balance = 1;
3561 busiest->push_cpu = this_cpu;
3562 active_balance = 1;
3563 }
3564 spin_unlock_irqrestore(&busiest->lock, flags);
3565 if (active_balance)
3566 wake_up_process(busiest->migration_thread);
3567
3568 /*
3569 * We've kicked active balancing, reset the failure
3570 * counter.
3571 */
3572 sd->nr_balance_failed = sd->cache_nice_tries+1;
3573 }
3574 } else
3575 sd->nr_balance_failed = 0;
3576
3577 if (likely(!active_balance)) {
3578 /* We were unbalanced, so reset the balancing interval */
3579 sd->balance_interval = sd->min_interval;
3580 } else {
3581 /*
3582 * If we've begun active balancing, start to back off. This
3583 * case may not be covered by the all_pinned logic if there
3584 * is only 1 task on the busy runqueue (because we don't call
3585 * move_tasks).
3586 */
3587 if (sd->balance_interval < sd->max_interval)
3588 sd->balance_interval *= 2;
3589 }
3590
3591 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3592 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3593 ld_moved = -1;
3594
3595 goto out;
3596
3597 out_balanced:
3598 schedstat_inc(sd, lb_balanced[idle]);
3599
3600 sd->nr_balance_failed = 0;
3601
3602 out_one_pinned:
3603 /* tune up the balancing interval */
3604 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
3605 (sd->balance_interval < sd->max_interval))
3606 sd->balance_interval *= 2;
3607
3608 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3609 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3610 ld_moved = -1;
3611 else
3612 ld_moved = 0;
3613 out:
3614 if (ld_moved)
3615 update_shares(sd);
3616 return ld_moved;
3617 }
3618
3619 /*
3620 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3621 * tasks if there is an imbalance.
3622 *
3623 * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
3624 * this_rq is locked.
3625 */
3626 static int
load_balance_newidle(int this_cpu,struct rq * this_rq,struct sched_domain * sd,struct cpumask * cpus)3627 load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
3628 struct cpumask *cpus)
3629 {
3630 struct sched_group *group;
3631 struct rq *busiest = NULL;
3632 unsigned long imbalance;
3633 int ld_moved = 0;
3634 int sd_idle = 0;
3635 int all_pinned = 0;
3636
3637 cpumask_setall(cpus);
3638
3639 /*
3640 * When power savings policy is enabled for the parent domain, idle
3641 * sibling can pick up load irrespective of busy siblings. In this case,
3642 * let the state of idle sibling percolate up as IDLE, instead of
3643 * portraying it as CPU_NOT_IDLE.
3644 */
3645 if (sd->flags & SD_SHARE_CPUPOWER &&
3646 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3647 sd_idle = 1;
3648
3649 schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
3650 redo:
3651 update_shares_locked(this_rq, sd);
3652 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
3653 &sd_idle, cpus, NULL);
3654 if (!group) {
3655 schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
3656 goto out_balanced;
3657 }
3658
3659 busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus);
3660 if (!busiest) {
3661 schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
3662 goto out_balanced;
3663 }
3664
3665 BUG_ON(busiest == this_rq);
3666
3667 schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
3668
3669 ld_moved = 0;
3670 if (busiest->nr_running > 1) {
3671 /* Attempt to move tasks */
3672 double_lock_balance(this_rq, busiest);
3673 /* this_rq->clock is already updated */
3674 update_rq_clock(busiest);
3675 ld_moved = move_tasks(this_rq, this_cpu, busiest,
3676 imbalance, sd, CPU_NEWLY_IDLE,
3677 &all_pinned);
3678 double_unlock_balance(this_rq, busiest);
3679
3680 if (unlikely(all_pinned)) {
3681 cpumask_clear_cpu(cpu_of(busiest), cpus);
3682 if (!cpumask_empty(cpus))
3683 goto redo;
3684 }
3685 }
3686
3687 if (!ld_moved) {
3688 int active_balance = 0;
3689
3690 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
3691 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3692 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3693 return -1;
3694
3695 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
3696 return -1;
3697
3698 if (sd->nr_balance_failed++ < 2)
3699 return -1;
3700
3701 /*
3702 * The only task running in a non-idle cpu can be moved to this
3703 * cpu in an attempt to completely freeup the other CPU
3704 * package. The same method used to move task in load_balance()
3705 * have been extended for load_balance_newidle() to speedup
3706 * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2)
3707 *
3708 * The package power saving logic comes from
3709 * find_busiest_group(). If there are no imbalance, then
3710 * f_b_g() will return NULL. However when sched_mc={1,2} then
3711 * f_b_g() will select a group from which a running task may be
3712 * pulled to this cpu in order to make the other package idle.
3713 * If there is no opportunity to make a package idle and if
3714 * there are no imbalance, then f_b_g() will return NULL and no
3715 * action will be taken in load_balance_newidle().
3716 *
3717 * Under normal task pull operation due to imbalance, there
3718 * will be more than one task in the source run queue and
3719 * move_tasks() will succeed. ld_moved will be true and this
3720 * active balance code will not be triggered.
3721 */
3722
3723 /* Lock busiest in correct order while this_rq is held */
3724 double_lock_balance(this_rq, busiest);
3725
3726 /*
3727 * don't kick the migration_thread, if the curr
3728 * task on busiest cpu can't be moved to this_cpu
3729 */
3730 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
3731 double_unlock_balance(this_rq, busiest);
3732 all_pinned = 1;
3733 return ld_moved;
3734 }
3735
3736 if (!busiest->active_balance) {
3737 busiest->active_balance = 1;
3738 busiest->push_cpu = this_cpu;
3739 active_balance = 1;
3740 }
3741
3742 double_unlock_balance(this_rq, busiest);
3743 /*
3744 * Should not call ttwu while holding a rq->lock
3745 */
3746 spin_unlock(&this_rq->lock);
3747 if (active_balance)
3748 wake_up_process(busiest->migration_thread);
3749 spin_lock(&this_rq->lock);
3750
3751 } else
3752 sd->nr_balance_failed = 0;
3753
3754 update_shares_locked(this_rq, sd);
3755 return ld_moved;
3756
3757 out_balanced:
3758 schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
3759 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3760 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3761 return -1;
3762 sd->nr_balance_failed = 0;
3763
3764 return 0;
3765 }
3766
3767 /*
3768 * idle_balance is called by schedule() if this_cpu is about to become
3769 * idle. Attempts to pull tasks from other CPUs.
3770 */
idle_balance(int this_cpu,struct rq * this_rq)3771 static void idle_balance(int this_cpu, struct rq *this_rq)
3772 {
3773 struct sched_domain *sd;
3774 int pulled_task = 0;
3775 unsigned long next_balance = jiffies + HZ;
3776 cpumask_var_t tmpmask;
3777
3778 if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
3779 return;
3780
3781 for_each_domain(this_cpu, sd) {
3782 unsigned long interval;
3783
3784 if (!(sd->flags & SD_LOAD_BALANCE))
3785 continue;
3786
3787 if (sd->flags & SD_BALANCE_NEWIDLE)
3788 /* If we've pulled tasks over stop searching: */
3789 pulled_task = load_balance_newidle(this_cpu, this_rq,
3790 sd, tmpmask);
3791
3792 interval = msecs_to_jiffies(sd->balance_interval);
3793 if (time_after(next_balance, sd->last_balance + interval))
3794 next_balance = sd->last_balance + interval;
3795 if (pulled_task)
3796 break;
3797 }
3798 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
3799 /*
3800 * We are going idle. next_balance may be set based on
3801 * a busy processor. So reset next_balance.
3802 */
3803 this_rq->next_balance = next_balance;
3804 }
3805 free_cpumask_var(tmpmask);
3806 }
3807
3808 /*
3809 * active_load_balance is run by migration threads. It pushes running tasks
3810 * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
3811 * running on each physical CPU where possible, and avoids physical /
3812 * logical imbalances.
3813 *
3814 * Called with busiest_rq locked.
3815 */
active_load_balance(struct rq * busiest_rq,int busiest_cpu)3816 static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3817 {
3818 int target_cpu = busiest_rq->push_cpu;
3819 struct sched_domain *sd;
3820 struct rq *target_rq;
3821
3822 /* Is there any task to move? */
3823 if (busiest_rq->nr_running <= 1)
3824 return;
3825
3826 target_rq = cpu_rq(target_cpu);
3827
3828 /*
3829 * This condition is "impossible", if it occurs
3830 * we need to fix it. Originally reported by
3831 * Bjorn Helgaas on a 128-cpu setup.
3832 */
3833 BUG_ON(busiest_rq == target_rq);
3834
3835 /* move a task from busiest_rq to target_rq */
3836 double_lock_balance(busiest_rq, target_rq);
3837 update_rq_clock(busiest_rq);
3838 update_rq_clock(target_rq);
3839
3840 /* Search for an sd spanning us and the target CPU. */
3841 for_each_domain(target_cpu, sd) {
3842 if ((sd->flags & SD_LOAD_BALANCE) &&
3843 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
3844 break;
3845 }
3846
3847 if (likely(sd)) {
3848 schedstat_inc(sd, alb_count);
3849
3850 if (move_one_task(target_rq, target_cpu, busiest_rq,
3851 sd, CPU_IDLE))
3852 schedstat_inc(sd, alb_pushed);
3853 else
3854 schedstat_inc(sd, alb_failed);
3855 }
3856 double_unlock_balance(busiest_rq, target_rq);
3857 }
3858
3859 #ifdef CONFIG_NO_HZ
3860 static struct {
3861 atomic_t load_balancer;
3862 cpumask_var_t cpu_mask;
3863 } nohz ____cacheline_aligned = {
3864 .load_balancer = ATOMIC_INIT(-1),
3865 };
3866
3867 /*
3868 * This routine will try to nominate the ilb (idle load balancing)
3869 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
3870 * load balancing on behalf of all those cpus. If all the cpus in the system
3871 * go into this tickless mode, then there will be no ilb owner (as there is
3872 * no need for one) and all the cpus will sleep till the next wakeup event
3873 * arrives...
3874 *
3875 * For the ilb owner, tick is not stopped. And this tick will be used
3876 * for idle load balancing. ilb owner will still be part of
3877 * nohz.cpu_mask..
3878 *
3879 * While stopping the tick, this cpu will become the ilb owner if there
3880 * is no other owner. And will be the owner till that cpu becomes busy
3881 * or if all cpus in the system stop their ticks at which point
3882 * there is no need for ilb owner.
3883 *
3884 * When the ilb owner becomes busy, it nominates another owner, during the
3885 * next busy scheduler_tick()
3886 */
select_nohz_load_balancer(int stop_tick)3887 int select_nohz_load_balancer(int stop_tick)
3888 {
3889 int cpu = smp_processor_id();
3890
3891 if (stop_tick) {
3892 cpu_rq(cpu)->in_nohz_recently = 1;
3893
3894 if (!cpu_active(cpu)) {
3895 if (atomic_read(&nohz.load_balancer) != cpu)
3896 return 0;
3897
3898 /*
3899 * If we are going offline and still the leader,
3900 * give up!
3901 */
3902 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3903 BUG();
3904
3905 return 0;
3906 }
3907
3908 cpumask_set_cpu(cpu, nohz.cpu_mask);
3909
3910 /* time for ilb owner also to sleep */
3911 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
3912 if (atomic_read(&nohz.load_balancer) == cpu)
3913 atomic_set(&nohz.load_balancer, -1);
3914 return 0;
3915 }
3916
3917 if (atomic_read(&nohz.load_balancer) == -1) {
3918 /* make me the ilb owner */
3919 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
3920 return 1;
3921 } else if (atomic_read(&nohz.load_balancer) == cpu)
3922 return 1;
3923 } else {
3924 if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
3925 return 0;
3926
3927 cpumask_clear_cpu(cpu, nohz.cpu_mask);
3928
3929 if (atomic_read(&nohz.load_balancer) == cpu)
3930 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3931 BUG();
3932 }
3933 return 0;
3934 }
3935 #endif
3936
3937 static DEFINE_SPINLOCK(balancing);
3938
3939 /*
3940 * It checks each scheduling domain to see if it is due to be balanced,
3941 * and initiates a balancing operation if so.
3942 *
3943 * Balancing parameters are set up in arch_init_sched_domains.
3944 */
rebalance_domains(int cpu,enum cpu_idle_type idle)3945 static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3946 {
3947 int balance = 1;
3948 struct rq *rq = cpu_rq(cpu);
3949 unsigned long interval;
3950 struct sched_domain *sd;
3951 /* Earliest time when we have to do rebalance again */
3952 unsigned long next_balance = jiffies + 60*HZ;
3953 int update_next_balance = 0;
3954 int need_serialize;
3955 cpumask_var_t tmp;
3956
3957 /* Fails alloc? Rebalancing probably not a priority right now. */
3958 if (!alloc_cpumask_var(&tmp, GFP_ATOMIC))
3959 return;
3960
3961 for_each_domain(cpu, sd) {
3962 if (!(sd->flags & SD_LOAD_BALANCE))
3963 continue;
3964
3965 interval = sd->balance_interval;
3966 if (idle != CPU_IDLE)
3967 interval *= sd->busy_factor;
3968
3969 /* scale ms to jiffies */
3970 interval = msecs_to_jiffies(interval);
3971 if (unlikely(!interval))
3972 interval = 1;
3973 if (interval > HZ*NR_CPUS/10)
3974 interval = HZ*NR_CPUS/10;
3975
3976 need_serialize = sd->flags & SD_SERIALIZE;
3977
3978 if (need_serialize) {
3979 if (!spin_trylock(&balancing))
3980 goto out;
3981 }
3982
3983 if (time_after_eq(jiffies, sd->last_balance + interval)) {
3984 if (load_balance(cpu, rq, sd, idle, &balance, tmp)) {
3985 /*
3986 * We've pulled tasks over so either we're no
3987 * longer idle, or one of our SMT siblings is
3988 * not idle.
3989 */
3990 idle = CPU_NOT_IDLE;
3991 }
3992 sd->last_balance = jiffies;
3993 }
3994 if (need_serialize)
3995 spin_unlock(&balancing);
3996 out:
3997 if (time_after(next_balance, sd->last_balance + interval)) {
3998 next_balance = sd->last_balance + interval;
3999 update_next_balance = 1;
4000 }
4001
4002 /*
4003 * Stop the load balance at this level. There is another
4004 * CPU in our sched group which is doing load balancing more
4005 * actively.
4006 */
4007 if (!balance)
4008 break;
4009 }
4010
4011 /*
4012 * next_balance will be updated only when there is a need.
4013 * When the cpu is attached to null domain for ex, it will not be
4014 * updated.
4015 */
4016 if (likely(update_next_balance))
4017 rq->next_balance = next_balance;
4018
4019 free_cpumask_var(tmp);
4020 }
4021
4022 /*
4023 * run_rebalance_domains is triggered when needed from the scheduler tick.
4024 * In CONFIG_NO_HZ case, the idle load balance owner will do the
4025 * rebalancing for all the cpus for whom scheduler ticks are stopped.
4026 */
run_rebalance_domains(struct softirq_action * h)4027 static void run_rebalance_domains(struct softirq_action *h)
4028 {
4029 int this_cpu = smp_processor_id();
4030 struct rq *this_rq = cpu_rq(this_cpu);
4031 enum cpu_idle_type idle = this_rq->idle_at_tick ?
4032 CPU_IDLE : CPU_NOT_IDLE;
4033
4034 rebalance_domains(this_cpu, idle);
4035
4036 #ifdef CONFIG_NO_HZ
4037 /*
4038 * If this cpu is the owner for idle load balancing, then do the
4039 * balancing on behalf of the other idle cpus whose ticks are
4040 * stopped.
4041 */
4042 if (this_rq->idle_at_tick &&
4043 atomic_read(&nohz.load_balancer) == this_cpu) {
4044 struct rq *rq;
4045 int balance_cpu;
4046
4047 for_each_cpu(balance_cpu, nohz.cpu_mask) {
4048 if (balance_cpu == this_cpu)
4049 continue;
4050
4051 /*
4052 * If this cpu gets work to do, stop the load balancing
4053 * work being done for other cpus. Next load
4054 * balancing owner will pick it up.
4055 */
4056 if (need_resched())
4057 break;
4058
4059 rebalance_domains(balance_cpu, CPU_IDLE);
4060
4061 rq = cpu_rq(balance_cpu);
4062 if (time_after(this_rq->next_balance, rq->next_balance))
4063 this_rq->next_balance = rq->next_balance;
4064 }
4065 }
4066 #endif
4067 }
4068
4069 /*
4070 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
4071 *
4072 * In case of CONFIG_NO_HZ, this is the place where we nominate a new
4073 * idle load balancing owner or decide to stop the periodic load balancing,
4074 * if the whole system is idle.
4075 */
trigger_load_balance(struct rq * rq,int cpu)4076 static inline void trigger_load_balance(struct rq *rq, int cpu)
4077 {
4078 #ifdef CONFIG_NO_HZ
4079 /*
4080 * If we were in the nohz mode recently and busy at the current
4081 * scheduler tick, then check if we need to nominate new idle
4082 * load balancer.
4083 */
4084 if (rq->in_nohz_recently && !rq->idle_at_tick) {
4085 rq->in_nohz_recently = 0;
4086
4087 if (atomic_read(&nohz.load_balancer) == cpu) {
4088 cpumask_clear_cpu(cpu, nohz.cpu_mask);
4089 atomic_set(&nohz.load_balancer, -1);
4090 }
4091
4092 if (atomic_read(&nohz.load_balancer) == -1) {
4093 /*
4094 * simple selection for now: Nominate the
4095 * first cpu in the nohz list to be the next
4096 * ilb owner.
4097 *
4098 * TBD: Traverse the sched domains and nominate
4099 * the nearest cpu in the nohz.cpu_mask.
4100 */
4101 int ilb = cpumask_first(nohz.cpu_mask);
4102
4103 if (ilb < nr_cpu_ids)
4104 resched_cpu(ilb);
4105 }
4106 }
4107
4108 /*
4109 * If this cpu is idle and doing idle load balancing for all the
4110 * cpus with ticks stopped, is it time for that to stop?
4111 */
4112 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
4113 cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
4114 resched_cpu(cpu);
4115 return;
4116 }
4117
4118 /*
4119 * If this cpu is idle and the idle load balancing is done by
4120 * someone else, then no need raise the SCHED_SOFTIRQ
4121 */
4122 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
4123 cpumask_test_cpu(cpu, nohz.cpu_mask))
4124 return;
4125 #endif
4126 if (time_after_eq(jiffies, rq->next_balance))
4127 raise_softirq(SCHED_SOFTIRQ);
4128 }
4129
4130 #else /* CONFIG_SMP */
4131
4132 /*
4133 * on UP we do not need to balance between CPUs:
4134 */
idle_balance(int cpu,struct rq * rq)4135 static inline void idle_balance(int cpu, struct rq *rq)
4136 {
4137 }
4138
4139 #endif
4140
4141 DEFINE_PER_CPU(struct kernel_stat, kstat);
4142
4143 EXPORT_PER_CPU_SYMBOL(kstat);
4144
4145 /*
4146 * Return any ns on the sched_clock that have not yet been accounted in
4147 * @p in case that task is currently running.
4148 *
4149 * Called with task_rq_lock() held on @rq.
4150 */
do_task_delta_exec(struct task_struct * p,struct rq * rq)4151 static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
4152 {
4153 u64 ns = 0;
4154
4155 if (task_current(rq, p)) {
4156 update_rq_clock(rq);
4157 ns = rq->clock - p->se.exec_start;
4158 if ((s64)ns < 0)
4159 ns = 0;
4160 }
4161
4162 return ns;
4163 }
4164
task_delta_exec(struct task_struct * p)4165 unsigned long long task_delta_exec(struct task_struct *p)
4166 {
4167 unsigned long flags;
4168 struct rq *rq;
4169 u64 ns = 0;
4170
4171 rq = task_rq_lock(p, &flags);
4172 ns = do_task_delta_exec(p, rq);
4173 task_rq_unlock(rq, &flags);
4174
4175 return ns;
4176 }
4177
4178 /*
4179 * Return accounted runtime for the task.
4180 * In case the task is currently running, return the runtime plus current's
4181 * pending runtime that have not been accounted yet.
4182 */
task_sched_runtime(struct task_struct * p)4183 unsigned long long task_sched_runtime(struct task_struct *p)
4184 {
4185 unsigned long flags;
4186 struct rq *rq;
4187 u64 ns = 0;
4188
4189 rq = task_rq_lock(p, &flags);
4190 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
4191 task_rq_unlock(rq, &flags);
4192
4193 return ns;
4194 }
4195
4196 /*
4197 * Return sum_exec_runtime for the thread group.
4198 * In case the task is currently running, return the sum plus current's
4199 * pending runtime that have not been accounted yet.
4200 *
4201 * Note that the thread group might have other running tasks as well,
4202 * so the return value not includes other pending runtime that other
4203 * running tasks might have.
4204 */
thread_group_sched_runtime(struct task_struct * p)4205 unsigned long long thread_group_sched_runtime(struct task_struct *p)
4206 {
4207 struct task_cputime totals;
4208 unsigned long flags;
4209 struct rq *rq;
4210 u64 ns;
4211
4212 rq = task_rq_lock(p, &flags);
4213 thread_group_cputime(p, &totals);
4214 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
4215 task_rq_unlock(rq, &flags);
4216
4217 return ns;
4218 }
4219
4220 /*
4221 * Account user cpu time to a process.
4222 * @p: the process that the cpu time gets accounted to
4223 * @cputime: the cpu time spent in user space since the last update
4224 * @cputime_scaled: cputime scaled by cpu frequency
4225 */
account_user_time(struct task_struct * p,cputime_t cputime,cputime_t cputime_scaled)4226 void account_user_time(struct task_struct *p, cputime_t cputime,
4227 cputime_t cputime_scaled)
4228 {
4229 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4230 cputime64_t tmp;
4231
4232 /* Add user time to process. */
4233 p->utime = cputime_add(p->utime, cputime);
4234 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
4235 account_group_user_time(p, cputime);
4236
4237 /* Add user time to cpustat. */
4238 tmp = cputime_to_cputime64(cputime);
4239 if (TASK_NICE(p) > 0)
4240 cpustat->nice = cputime64_add(cpustat->nice, tmp);
4241 else
4242 cpustat->user = cputime64_add(cpustat->user, tmp);
4243 /* Account for user time used */
4244 acct_update_integrals(p);
4245 }
4246
4247 /*
4248 * Account guest cpu time to a process.
4249 * @p: the process that the cpu time gets accounted to
4250 * @cputime: the cpu time spent in virtual machine since the last update
4251 * @cputime_scaled: cputime scaled by cpu frequency
4252 */
account_guest_time(struct task_struct * p,cputime_t cputime,cputime_t cputime_scaled)4253 static void account_guest_time(struct task_struct *p, cputime_t cputime,
4254 cputime_t cputime_scaled)
4255 {
4256 cputime64_t tmp;
4257 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4258
4259 tmp = cputime_to_cputime64(cputime);
4260
4261 /* Add guest time to process. */
4262 p->utime = cputime_add(p->utime, cputime);
4263 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
4264 account_group_user_time(p, cputime);
4265 p->gtime = cputime_add(p->gtime, cputime);
4266
4267 /* Add guest time to cpustat. */
4268 cpustat->user = cputime64_add(cpustat->user, tmp);
4269 cpustat->guest = cputime64_add(cpustat->guest, tmp);
4270 }
4271
4272 /*
4273 * Account system cpu time to a process.
4274 * @p: the process that the cpu time gets accounted to
4275 * @hardirq_offset: the offset to subtract from hardirq_count()
4276 * @cputime: the cpu time spent in kernel space since the last update
4277 * @cputime_scaled: cputime scaled by cpu frequency
4278 */
account_system_time(struct task_struct * p,int hardirq_offset,cputime_t cputime,cputime_t cputime_scaled)4279 void account_system_time(struct task_struct *p, int hardirq_offset,
4280 cputime_t cputime, cputime_t cputime_scaled)
4281 {
4282 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4283 cputime64_t tmp;
4284
4285 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
4286 account_guest_time(p, cputime, cputime_scaled);
4287 return;
4288 }
4289
4290 /* Add system time to process. */
4291 p->stime = cputime_add(p->stime, cputime);
4292 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
4293 account_group_system_time(p, cputime);
4294
4295 /* Add system time to cpustat. */
4296 tmp = cputime_to_cputime64(cputime);
4297 if (hardirq_count() - hardirq_offset)
4298 cpustat->irq = cputime64_add(cpustat->irq, tmp);
4299 else if (softirq_count())
4300 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
4301 else
4302 cpustat->system = cputime64_add(cpustat->system, tmp);
4303
4304 /* Account for system time used */
4305 acct_update_integrals(p);
4306 }
4307
4308 /*
4309 * Account for involuntary wait time.
4310 * @steal: the cpu time spent in involuntary wait
4311 */
account_steal_time(cputime_t cputime)4312 void account_steal_time(cputime_t cputime)
4313 {
4314 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4315 cputime64_t cputime64 = cputime_to_cputime64(cputime);
4316
4317 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
4318 }
4319
4320 /*
4321 * Account for idle time.
4322 * @cputime: the cpu time spent in idle wait
4323 */
account_idle_time(cputime_t cputime)4324 void account_idle_time(cputime_t cputime)
4325 {
4326 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4327 cputime64_t cputime64 = cputime_to_cputime64(cputime);
4328 struct rq *rq = this_rq();
4329
4330 if (atomic_read(&rq->nr_iowait) > 0)
4331 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
4332 else
4333 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
4334 }
4335
4336 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
4337
4338 /*
4339 * Account a single tick of cpu time.
4340 * @p: the process that the cpu time gets accounted to
4341 * @user_tick: indicates if the tick is a user or a system tick
4342 */
account_process_tick(struct task_struct * p,int user_tick)4343 void account_process_tick(struct task_struct *p, int user_tick)
4344 {
4345 cputime_t one_jiffy = jiffies_to_cputime(1);
4346 cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
4347 struct rq *rq = this_rq();
4348
4349 if (user_tick)
4350 account_user_time(p, one_jiffy, one_jiffy_scaled);
4351 else if (p != rq->idle)
4352 account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
4353 one_jiffy_scaled);
4354 else
4355 account_idle_time(one_jiffy);
4356 }
4357
4358 /*
4359 * Account multiple ticks of steal time.
4360 * @p: the process from which the cpu time has been stolen
4361 * @ticks: number of stolen ticks
4362 */
account_steal_ticks(unsigned long ticks)4363 void account_steal_ticks(unsigned long ticks)
4364 {
4365 account_steal_time(jiffies_to_cputime(ticks));
4366 }
4367
4368 /*
4369 * Account multiple ticks of idle time.
4370 * @ticks: number of stolen ticks
4371 */
account_idle_ticks(unsigned long ticks)4372 void account_idle_ticks(unsigned long ticks)
4373 {
4374 account_idle_time(jiffies_to_cputime(ticks));
4375 }
4376
4377 #endif
4378
4379 /*
4380 * Use precise platform statistics if available:
4381 */
4382 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
task_utime(struct task_struct * p)4383 cputime_t task_utime(struct task_struct *p)
4384 {
4385 return p->utime;
4386 }
4387
task_stime(struct task_struct * p)4388 cputime_t task_stime(struct task_struct *p)
4389 {
4390 return p->stime;
4391 }
4392 #else
task_utime(struct task_struct * p)4393 cputime_t task_utime(struct task_struct *p)
4394 {
4395 clock_t utime = cputime_to_clock_t(p->utime),
4396 total = utime + cputime_to_clock_t(p->stime);
4397 u64 temp;
4398
4399 /*
4400 * Use CFS's precise accounting:
4401 */
4402 temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
4403
4404 if (total) {
4405 temp *= utime;
4406 do_div(temp, total);
4407 }
4408 utime = (clock_t)temp;
4409
4410 p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
4411 return p->prev_utime;
4412 }
4413
task_stime(struct task_struct * p)4414 cputime_t task_stime(struct task_struct *p)
4415 {
4416 clock_t stime;
4417
4418 /*
4419 * Use CFS's precise accounting. (we subtract utime from
4420 * the total, to make sure the total observed by userspace
4421 * grows monotonically - apps rely on that):
4422 */
4423 stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
4424 cputime_to_clock_t(task_utime(p));
4425
4426 if (stime >= 0)
4427 p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
4428
4429 return p->prev_stime;
4430 }
4431 #endif
4432
task_gtime(struct task_struct * p)4433 inline cputime_t task_gtime(struct task_struct *p)
4434 {
4435 return p->gtime;
4436 }
4437
4438 /*
4439 * This function gets called by the timer code, with HZ frequency.
4440 * We call it with interrupts disabled.
4441 *
4442 * It also gets called by the fork code, when changing the parent's
4443 * timeslices.
4444 */
scheduler_tick(void)4445 void scheduler_tick(void)
4446 {
4447 int cpu = smp_processor_id();
4448 struct rq *rq = cpu_rq(cpu);
4449 struct task_struct *curr = rq->curr;
4450
4451 sched_clock_tick();
4452
4453 spin_lock(&rq->lock);
4454 update_rq_clock(rq);
4455 update_cpu_load(rq);
4456 curr->sched_class->task_tick(rq, curr, 0);
4457 spin_unlock(&rq->lock);
4458
4459 #ifdef CONFIG_SMP
4460 rq->idle_at_tick = idle_cpu(cpu);
4461 trigger_load_balance(rq, cpu);
4462 #endif
4463 }
4464
4465 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
4466 defined(CONFIG_PREEMPT_TRACER))
4467
get_parent_ip(unsigned long addr)4468 static inline unsigned long get_parent_ip(unsigned long addr)
4469 {
4470 if (in_lock_functions(addr)) {
4471 addr = CALLER_ADDR2;
4472 if (in_lock_functions(addr))
4473 addr = CALLER_ADDR3;
4474 }
4475 return addr;
4476 }
4477
add_preempt_count(int val)4478 void __kprobes add_preempt_count(int val)
4479 {
4480 #ifdef CONFIG_DEBUG_PREEMPT
4481 /*
4482 * Underflow?
4483 */
4484 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
4485 return;
4486 #endif
4487 preempt_count() += val;
4488 #ifdef CONFIG_DEBUG_PREEMPT
4489 /*
4490 * Spinlock count overflowing soon?
4491 */
4492 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
4493 PREEMPT_MASK - 10);
4494 #endif
4495 if (preempt_count() == val)
4496 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
4497 }
4498 EXPORT_SYMBOL(add_preempt_count);
4499
sub_preempt_count(int val)4500 void __kprobes sub_preempt_count(int val)
4501 {
4502 #ifdef CONFIG_DEBUG_PREEMPT
4503 /*
4504 * Underflow?
4505 */
4506 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
4507 return;
4508 /*
4509 * Is the spinlock portion underflowing?
4510 */
4511 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
4512 !(preempt_count() & PREEMPT_MASK)))
4513 return;
4514 #endif
4515
4516 if (preempt_count() == val)
4517 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
4518 preempt_count() -= val;
4519 }
4520 EXPORT_SYMBOL(sub_preempt_count);
4521
4522 #endif
4523
4524 /*
4525 * Print scheduling while atomic bug:
4526 */
__schedule_bug(struct task_struct * prev)4527 static noinline void __schedule_bug(struct task_struct *prev)
4528 {
4529 struct pt_regs *regs = get_irq_regs();
4530
4531 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
4532 prev->comm, prev->pid, preempt_count());
4533
4534 debug_show_held_locks(prev);
4535 print_modules();
4536 if (irqs_disabled())
4537 print_irqtrace_events(prev);
4538
4539 if (regs)
4540 show_regs(regs);
4541 else
4542 dump_stack();
4543 }
4544
4545 /*
4546 * Various schedule()-time debugging checks and statistics:
4547 */
schedule_debug(struct task_struct * prev)4548 static inline void schedule_debug(struct task_struct *prev)
4549 {
4550 /*
4551 * Test if we are atomic. Since do_exit() needs to call into
4552 * schedule() atomically, we ignore that path for now.
4553 * Otherwise, whine if we are scheduling when we should not be.
4554 */
4555 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
4556 __schedule_bug(prev);
4557
4558 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4559
4560 schedstat_inc(this_rq(), sched_count);
4561 #ifdef CONFIG_SCHEDSTATS
4562 if (unlikely(prev->lock_depth >= 0)) {
4563 schedstat_inc(this_rq(), bkl_count);
4564 schedstat_inc(prev, sched_info.bkl_count);
4565 }
4566 #endif
4567 }
4568
4569 /*
4570 * Pick up the highest-prio task:
4571 */
4572 static inline struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev)4573 pick_next_task(struct rq *rq, struct task_struct *prev)
4574 {
4575 const struct sched_class *class;
4576 struct task_struct *p;
4577
4578 /*
4579 * Optimization: we know that if all tasks are in
4580 * the fair class we can call that function directly:
4581 */
4582 if (likely(rq->nr_running == rq->cfs.nr_running)) {
4583 p = fair_sched_class.pick_next_task(rq);
4584 if (likely(p))
4585 return p;
4586 }
4587
4588 class = sched_class_highest;
4589 for ( ; ; ) {
4590 p = class->pick_next_task(rq);
4591 if (p)
4592 return p;
4593 /*
4594 * Will never be NULL as the idle class always
4595 * returns a non-NULL p:
4596 */
4597 class = class->next;
4598 }
4599 }
4600
4601 /*
4602 * schedule() is the main scheduler function.
4603 */
schedule(void)4604 asmlinkage void __sched schedule(void)
4605 {
4606 struct task_struct *prev, *next;
4607 unsigned long *switch_count;
4608 struct rq *rq;
4609 int cpu;
4610
4611 need_resched:
4612 preempt_disable();
4613 cpu = smp_processor_id();
4614 rq = cpu_rq(cpu);
4615 rcu_qsctr_inc(cpu);
4616 prev = rq->curr;
4617 switch_count = &prev->nivcsw;
4618
4619 release_kernel_lock(prev);
4620 need_resched_nonpreemptible:
4621
4622 schedule_debug(prev);
4623
4624 if (sched_feat(HRTICK))
4625 hrtick_clear(rq);
4626
4627 spin_lock_irq(&rq->lock);
4628 update_rq_clock(rq);
4629 clear_tsk_need_resched(prev);
4630
4631 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
4632 if (unlikely(signal_pending_state(prev->state, prev)))
4633 prev->state = TASK_RUNNING;
4634 else
4635 deactivate_task(rq, prev, 1);
4636 switch_count = &prev->nvcsw;
4637 }
4638
4639 #ifdef CONFIG_SMP
4640 if (prev->sched_class->pre_schedule)
4641 prev->sched_class->pre_schedule(rq, prev);
4642 #endif
4643
4644 if (unlikely(!rq->nr_running))
4645 idle_balance(cpu, rq);
4646
4647 prev->sched_class->put_prev_task(rq, prev);
4648 next = pick_next_task(rq, prev);
4649
4650 if (likely(prev != next)) {
4651 sched_info_switch(prev, next);
4652
4653 rq->nr_switches++;
4654 rq->curr = next;
4655 ++*switch_count;
4656
4657 context_switch(rq, prev, next); /* unlocks the rq */
4658 /*
4659 * the context switch might have flipped the stack from under
4660 * us, hence refresh the local variables.
4661 */
4662 cpu = smp_processor_id();
4663 rq = cpu_rq(cpu);
4664 } else
4665 spin_unlock_irq(&rq->lock);
4666
4667 if (unlikely(reacquire_kernel_lock(current) < 0))
4668 goto need_resched_nonpreemptible;
4669
4670 preempt_enable_no_resched();
4671 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
4672 goto need_resched;
4673 }
4674 EXPORT_SYMBOL(schedule);
4675
4676 #ifdef CONFIG_PREEMPT
4677 /*
4678 * this is the entry point to schedule() from in-kernel preemption
4679 * off of preempt_enable. Kernel preemptions off return from interrupt
4680 * occur there and call schedule directly.
4681 */
preempt_schedule(void)4682 asmlinkage void __sched preempt_schedule(void)
4683 {
4684 struct thread_info *ti = current_thread_info();
4685
4686 /*
4687 * If there is a non-zero preempt_count or interrupts are disabled,
4688 * we do not want to preempt the current task. Just return..
4689 */
4690 if (likely(ti->preempt_count || irqs_disabled()))
4691 return;
4692
4693 do {
4694 add_preempt_count(PREEMPT_ACTIVE);
4695 schedule();
4696 sub_preempt_count(PREEMPT_ACTIVE);
4697
4698 /*
4699 * Check again in case we missed a preemption opportunity
4700 * between schedule and now.
4701 */
4702 barrier();
4703 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
4704 }
4705 EXPORT_SYMBOL(preempt_schedule);
4706
4707 /*
4708 * this is the entry point to schedule() from kernel preemption
4709 * off of irq context.
4710 * Note, that this is called and return with irqs disabled. This will
4711 * protect us against recursive calling from irq.
4712 */
preempt_schedule_irq(void)4713 asmlinkage void __sched preempt_schedule_irq(void)
4714 {
4715 struct thread_info *ti = current_thread_info();
4716
4717 /* Catch callers which need to be fixed */
4718 BUG_ON(ti->preempt_count || !irqs_disabled());
4719
4720 do {
4721 add_preempt_count(PREEMPT_ACTIVE);
4722 local_irq_enable();
4723 schedule();
4724 local_irq_disable();
4725 sub_preempt_count(PREEMPT_ACTIVE);
4726
4727 /*
4728 * Check again in case we missed a preemption opportunity
4729 * between schedule and now.
4730 */
4731 barrier();
4732 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
4733 }
4734
4735 #endif /* CONFIG_PREEMPT */
4736
default_wake_function(wait_queue_t * curr,unsigned mode,int sync,void * key)4737 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
4738 void *key)
4739 {
4740 return try_to_wake_up(curr->private, mode, sync);
4741 }
4742 EXPORT_SYMBOL(default_wake_function);
4743
4744 /*
4745 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4746 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
4747 * number) then we wake all the non-exclusive tasks and one exclusive task.
4748 *
4749 * There are circumstances in which we can try to wake a task which has already
4750 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
4751 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4752 */
__wake_up_common(wait_queue_head_t * q,unsigned int mode,int nr_exclusive,int sync,void * key)4753 void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4754 int nr_exclusive, int sync, void *key)
4755 {
4756 wait_queue_t *curr, *next;
4757
4758 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
4759 unsigned flags = curr->flags;
4760
4761 if (curr->func(curr, mode, sync, key) &&
4762 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
4763 break;
4764 }
4765 }
4766
4767 /**
4768 * __wake_up - wake up threads blocked on a waitqueue.
4769 * @q: the waitqueue
4770 * @mode: which threads
4771 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4772 * @key: is directly passed to the wakeup function
4773 */
__wake_up(wait_queue_head_t * q,unsigned int mode,int nr_exclusive,void * key)4774 void __wake_up(wait_queue_head_t *q, unsigned int mode,
4775 int nr_exclusive, void *key)
4776 {
4777 unsigned long flags;
4778
4779 spin_lock_irqsave(&q->lock, flags);
4780 __wake_up_common(q, mode, nr_exclusive, 0, key);
4781 spin_unlock_irqrestore(&q->lock, flags);
4782 }
4783 EXPORT_SYMBOL(__wake_up);
4784
4785 /*
4786 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4787 */
__wake_up_locked(wait_queue_head_t * q,unsigned int mode)4788 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4789 {
4790 __wake_up_common(q, mode, 1, 0, NULL);
4791 }
4792
4793 /**
4794 * __wake_up_sync - wake up threads blocked on a waitqueue.
4795 * @q: the waitqueue
4796 * @mode: which threads
4797 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4798 *
4799 * The sync wakeup differs that the waker knows that it will schedule
4800 * away soon, so while the target thread will be woken up, it will not
4801 * be migrated to another CPU - ie. the two threads are 'synchronized'
4802 * with each other. This can prevent needless bouncing between CPUs.
4803 *
4804 * On UP it can prevent extra preemption.
4805 */
4806 void
__wake_up_sync(wait_queue_head_t * q,unsigned int mode,int nr_exclusive)4807 __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4808 {
4809 unsigned long flags;
4810 int sync = 1;
4811
4812 if (unlikely(!q))
4813 return;
4814
4815 if (unlikely(!nr_exclusive))
4816 sync = 0;
4817
4818 spin_lock_irqsave(&q->lock, flags);
4819 __wake_up_common(q, mode, nr_exclusive, sync, NULL);
4820 spin_unlock_irqrestore(&q->lock, flags);
4821 }
4822 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4823
4824 /**
4825 * complete: - signals a single thread waiting on this completion
4826 * @x: holds the state of this particular completion
4827 *
4828 * This will wake up a single thread waiting on this completion. Threads will be
4829 * awakened in the same order in which they were queued.
4830 *
4831 * See also complete_all(), wait_for_completion() and related routines.
4832 */
complete(struct completion * x)4833 void complete(struct completion *x)
4834 {
4835 unsigned long flags;
4836
4837 spin_lock_irqsave(&x->wait.lock, flags);
4838 x->done++;
4839 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
4840 spin_unlock_irqrestore(&x->wait.lock, flags);
4841 }
4842 EXPORT_SYMBOL(complete);
4843
4844 /**
4845 * complete_all: - signals all threads waiting on this completion
4846 * @x: holds the state of this particular completion
4847 *
4848 * This will wake up all threads waiting on this particular completion event.
4849 */
complete_all(struct completion * x)4850 void complete_all(struct completion *x)
4851 {
4852 unsigned long flags;
4853
4854 spin_lock_irqsave(&x->wait.lock, flags);
4855 x->done += UINT_MAX/2;
4856 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
4857 spin_unlock_irqrestore(&x->wait.lock, flags);
4858 }
4859 EXPORT_SYMBOL(complete_all);
4860
4861 static inline long __sched
do_wait_for_common(struct completion * x,long timeout,int state)4862 do_wait_for_common(struct completion *x, long timeout, int state)
4863 {
4864 if (!x->done) {
4865 DECLARE_WAITQUEUE(wait, current);
4866
4867 wait.flags |= WQ_FLAG_EXCLUSIVE;
4868 __add_wait_queue_tail(&x->wait, &wait);
4869 do {
4870 if (signal_pending_state(state, current)) {
4871 timeout = -ERESTARTSYS;
4872 break;
4873 }
4874 __set_current_state(state);
4875 spin_unlock_irq(&x->wait.lock);
4876 timeout = schedule_timeout(timeout);
4877 spin_lock_irq(&x->wait.lock);
4878 } while (!x->done && timeout);
4879 __remove_wait_queue(&x->wait, &wait);
4880 if (!x->done)
4881 return timeout;
4882 }
4883 x->done--;
4884 return timeout ?: 1;
4885 }
4886
4887 static long __sched
wait_for_common(struct completion * x,long timeout,int state)4888 wait_for_common(struct completion *x, long timeout, int state)
4889 {
4890 might_sleep();
4891
4892 spin_lock_irq(&x->wait.lock);
4893 timeout = do_wait_for_common(x, timeout, state);
4894 spin_unlock_irq(&x->wait.lock);
4895 return timeout;
4896 }
4897
4898 /**
4899 * wait_for_completion: - waits for completion of a task
4900 * @x: holds the state of this particular completion
4901 *
4902 * This waits to be signaled for completion of a specific task. It is NOT
4903 * interruptible and there is no timeout.
4904 *
4905 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4906 * and interrupt capability. Also see complete().
4907 */
wait_for_completion(struct completion * x)4908 void __sched wait_for_completion(struct completion *x)
4909 {
4910 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
4911 }
4912 EXPORT_SYMBOL(wait_for_completion);
4913
4914 /**
4915 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4916 * @x: holds the state of this particular completion
4917 * @timeout: timeout value in jiffies
4918 *
4919 * This waits for either a completion of a specific task to be signaled or for a
4920 * specified timeout to expire. The timeout is in jiffies. It is not
4921 * interruptible.
4922 */
4923 unsigned long __sched
wait_for_completion_timeout(struct completion * x,unsigned long timeout)4924 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4925 {
4926 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
4927 }
4928 EXPORT_SYMBOL(wait_for_completion_timeout);
4929
4930 /**
4931 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4932 * @x: holds the state of this particular completion
4933 *
4934 * This waits for completion of a specific task to be signaled. It is
4935 * interruptible.
4936 */
wait_for_completion_interruptible(struct completion * x)4937 int __sched wait_for_completion_interruptible(struct completion *x)
4938 {
4939 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4940 if (t == -ERESTARTSYS)
4941 return t;
4942 return 0;
4943 }
4944 EXPORT_SYMBOL(wait_for_completion_interruptible);
4945
4946 /**
4947 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4948 * @x: holds the state of this particular completion
4949 * @timeout: timeout value in jiffies
4950 *
4951 * This waits for either a completion of a specific task to be signaled or for a
4952 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4953 */
4954 unsigned long __sched
wait_for_completion_interruptible_timeout(struct completion * x,unsigned long timeout)4955 wait_for_completion_interruptible_timeout(struct completion *x,
4956 unsigned long timeout)
4957 {
4958 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
4959 }
4960 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4961
4962 /**
4963 * wait_for_completion_killable: - waits for completion of a task (killable)
4964 * @x: holds the state of this particular completion
4965 *
4966 * This waits to be signaled for completion of a specific task. It can be
4967 * interrupted by a kill signal.
4968 */
wait_for_completion_killable(struct completion * x)4969 int __sched wait_for_completion_killable(struct completion *x)
4970 {
4971 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4972 if (t == -ERESTARTSYS)
4973 return t;
4974 return 0;
4975 }
4976 EXPORT_SYMBOL(wait_for_completion_killable);
4977
4978 /**
4979 * try_wait_for_completion - try to decrement a completion without blocking
4980 * @x: completion structure
4981 *
4982 * Returns: 0 if a decrement cannot be done without blocking
4983 * 1 if a decrement succeeded.
4984 *
4985 * If a completion is being used as a counting completion,
4986 * attempt to decrement the counter without blocking. This
4987 * enables us to avoid waiting if the resource the completion
4988 * is protecting is not available.
4989 */
try_wait_for_completion(struct completion * x)4990 bool try_wait_for_completion(struct completion *x)
4991 {
4992 int ret = 1;
4993
4994 spin_lock_irq(&x->wait.lock);
4995 if (!x->done)
4996 ret = 0;
4997 else
4998 x->done--;
4999 spin_unlock_irq(&x->wait.lock);
5000 return ret;
5001 }
5002 EXPORT_SYMBOL(try_wait_for_completion);
5003
5004 /**
5005 * completion_done - Test to see if a completion has any waiters
5006 * @x: completion structure
5007 *
5008 * Returns: 0 if there are waiters (wait_for_completion() in progress)
5009 * 1 if there are no waiters.
5010 *
5011 */
completion_done(struct completion * x)5012 bool completion_done(struct completion *x)
5013 {
5014 int ret = 1;
5015
5016 spin_lock_irq(&x->wait.lock);
5017 if (!x->done)
5018 ret = 0;
5019 spin_unlock_irq(&x->wait.lock);
5020 return ret;
5021 }
5022 EXPORT_SYMBOL(completion_done);
5023
5024 static long __sched
sleep_on_common(wait_queue_head_t * q,int state,long timeout)5025 sleep_on_common(wait_queue_head_t *q, int state, long timeout)
5026 {
5027 unsigned long flags;
5028 wait_queue_t wait;
5029
5030 init_waitqueue_entry(&wait, current);
5031
5032 __set_current_state(state);
5033
5034 spin_lock_irqsave(&q->lock, flags);
5035 __add_wait_queue(q, &wait);
5036 spin_unlock(&q->lock);
5037 timeout = schedule_timeout(timeout);
5038 spin_lock_irq(&q->lock);
5039 __remove_wait_queue(q, &wait);
5040 spin_unlock_irqrestore(&q->lock, flags);
5041
5042 return timeout;
5043 }
5044
interruptible_sleep_on(wait_queue_head_t * q)5045 void __sched interruptible_sleep_on(wait_queue_head_t *q)
5046 {
5047 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
5048 }
5049 EXPORT_SYMBOL(interruptible_sleep_on);
5050
5051 long __sched
interruptible_sleep_on_timeout(wait_queue_head_t * q,long timeout)5052 interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
5053 {
5054 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
5055 }
5056 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
5057
sleep_on(wait_queue_head_t * q)5058 void __sched sleep_on(wait_queue_head_t *q)
5059 {
5060 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
5061 }
5062 EXPORT_SYMBOL(sleep_on);
5063
sleep_on_timeout(wait_queue_head_t * q,long timeout)5064 long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
5065 {
5066 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
5067 }
5068 EXPORT_SYMBOL(sleep_on_timeout);
5069
5070 #ifdef CONFIG_RT_MUTEXES
5071
5072 /*
5073 * rt_mutex_setprio - set the current priority of a task
5074 * @p: task
5075 * @prio: prio value (kernel-internal form)
5076 *
5077 * This function changes the 'effective' priority of a task. It does
5078 * not touch ->normal_prio like __setscheduler().
5079 *
5080 * Used by the rt_mutex code to implement priority inheritance logic.
5081 */
rt_mutex_setprio(struct task_struct * p,int prio)5082 void rt_mutex_setprio(struct task_struct *p, int prio)
5083 {
5084 unsigned long flags;
5085 int oldprio, on_rq, running;
5086 struct rq *rq;
5087 const struct sched_class *prev_class = p->sched_class;
5088
5089 BUG_ON(prio < 0 || prio > MAX_PRIO);
5090
5091 rq = task_rq_lock(p, &flags);
5092 update_rq_clock(rq);
5093
5094 oldprio = p->prio;
5095 on_rq = p->se.on_rq;
5096 running = task_current(rq, p);
5097 if (on_rq)
5098 dequeue_task(rq, p, 0);
5099 if (running)
5100 p->sched_class->put_prev_task(rq, p);
5101
5102 if (rt_prio(prio))
5103 p->sched_class = &rt_sched_class;
5104 else
5105 p->sched_class = &fair_sched_class;
5106
5107 p->prio = prio;
5108
5109 if (running)
5110 p->sched_class->set_curr_task(rq);
5111 if (on_rq) {
5112 enqueue_task(rq, p, 0);
5113
5114 check_class_changed(rq, p, prev_class, oldprio, running);
5115 }
5116 task_rq_unlock(rq, &flags);
5117 }
5118
5119 #endif
5120
set_user_nice(struct task_struct * p,long nice)5121 void set_user_nice(struct task_struct *p, long nice)
5122 {
5123 int old_prio, delta, on_rq;
5124 unsigned long flags;
5125 struct rq *rq;
5126
5127 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
5128 return;
5129 /*
5130 * We have to be careful, if called from sys_setpriority(),
5131 * the task might be in the middle of scheduling on another CPU.
5132 */
5133 rq = task_rq_lock(p, &flags);
5134 update_rq_clock(rq);
5135 /*
5136 * The RT priorities are set via sched_setscheduler(), but we still
5137 * allow the 'normal' nice value to be set - but as expected
5138 * it wont have any effect on scheduling until the task is
5139 * SCHED_FIFO/SCHED_RR:
5140 */
5141 if (task_has_rt_policy(p)) {
5142 p->static_prio = NICE_TO_PRIO(nice);
5143 goto out_unlock;
5144 }
5145 on_rq = p->se.on_rq;
5146 if (on_rq)
5147 dequeue_task(rq, p, 0);
5148
5149 p->static_prio = NICE_TO_PRIO(nice);
5150 set_load_weight(p);
5151 old_prio = p->prio;
5152 p->prio = effective_prio(p);
5153 delta = p->prio - old_prio;
5154
5155 if (on_rq) {
5156 enqueue_task(rq, p, 0);
5157 /*
5158 * If the task increased its priority or is running and
5159 * lowered its priority, then reschedule its CPU:
5160 */
5161 if (delta < 0 || (delta > 0 && task_running(rq, p)))
5162 resched_task(rq->curr);
5163 }
5164 out_unlock:
5165 task_rq_unlock(rq, &flags);
5166 }
5167 EXPORT_SYMBOL(set_user_nice);
5168
5169 /*
5170 * can_nice - check if a task can reduce its nice value
5171 * @p: task
5172 * @nice: nice value
5173 */
can_nice(const struct task_struct * p,const int nice)5174 int can_nice(const struct task_struct *p, const int nice)
5175 {
5176 /* convert nice value [19,-20] to rlimit style value [1,40] */
5177 int nice_rlim = 20 - nice;
5178
5179 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
5180 capable(CAP_SYS_NICE));
5181 }
5182
5183 #ifdef __ARCH_WANT_SYS_NICE
5184
5185 /*
5186 * sys_nice - change the priority of the current process.
5187 * @increment: priority increment
5188 *
5189 * sys_setpriority is a more generic, but much slower function that
5190 * does similar things.
5191 */
SYSCALL_DEFINE1(nice,int,increment)5192 SYSCALL_DEFINE1(nice, int, increment)
5193 {
5194 long nice, retval;
5195
5196 /*
5197 * Setpriority might change our priority at the same moment.
5198 * We don't have to worry. Conceptually one call occurs first
5199 * and we have a single winner.
5200 */
5201 if (increment < -40)
5202 increment = -40;
5203 if (increment > 40)
5204 increment = 40;
5205
5206 nice = PRIO_TO_NICE(current->static_prio) + increment;
5207 if (nice < -20)
5208 nice = -20;
5209 if (nice > 19)
5210 nice = 19;
5211
5212 if (increment < 0 && !can_nice(current, nice))
5213 return -EPERM;
5214
5215 retval = security_task_setnice(current, nice);
5216 if (retval)
5217 return retval;
5218
5219 set_user_nice(current, nice);
5220 return 0;
5221 }
5222
5223 #endif
5224
5225 /**
5226 * task_prio - return the priority value of a given task.
5227 * @p: the task in question.
5228 *
5229 * This is the priority value as seen by users in /proc.
5230 * RT tasks are offset by -200. Normal tasks are centered
5231 * around 0, value goes from -16 to +15.
5232 */
task_prio(const struct task_struct * p)5233 int task_prio(const struct task_struct *p)
5234 {
5235 return p->prio - MAX_RT_PRIO;
5236 }
5237
5238 /**
5239 * task_nice - return the nice value of a given task.
5240 * @p: the task in question.
5241 */
task_nice(const struct task_struct * p)5242 int task_nice(const struct task_struct *p)
5243 {
5244 return TASK_NICE(p);
5245 }
5246 EXPORT_SYMBOL(task_nice);
5247
5248 /**
5249 * idle_cpu - is a given cpu idle currently?
5250 * @cpu: the processor in question.
5251 */
idle_cpu(int cpu)5252 int idle_cpu(int cpu)
5253 {
5254 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
5255 }
5256
5257 /**
5258 * idle_task - return the idle task for a given cpu.
5259 * @cpu: the processor in question.
5260 */
idle_task(int cpu)5261 struct task_struct *idle_task(int cpu)
5262 {
5263 return cpu_rq(cpu)->idle;
5264 }
5265
5266 /**
5267 * find_process_by_pid - find a process with a matching PID value.
5268 * @pid: the pid in question.
5269 */
find_process_by_pid(pid_t pid)5270 static struct task_struct *find_process_by_pid(pid_t pid)
5271 {
5272 return pid ? find_task_by_vpid(pid) : current;
5273 }
5274
5275 /* Actually do priority change: must hold rq lock. */
5276 static void
__setscheduler(struct rq * rq,struct task_struct * p,int policy,int prio)5277 __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
5278 {
5279 BUG_ON(p->se.on_rq);
5280
5281 p->policy = policy;
5282 switch (p->policy) {
5283 case SCHED_NORMAL:
5284 case SCHED_BATCH:
5285 case SCHED_IDLE:
5286 p->sched_class = &fair_sched_class;
5287 break;
5288 case SCHED_FIFO:
5289 case SCHED_RR:
5290 p->sched_class = &rt_sched_class;
5291 break;
5292 }
5293
5294 p->rt_priority = prio;
5295 p->normal_prio = normal_prio(p);
5296 /* we are holding p->pi_lock already */
5297 p->prio = rt_mutex_getprio(p);
5298 set_load_weight(p);
5299 }
5300
5301 /*
5302 * check the target process has a UID that matches the current process's
5303 */
check_same_owner(struct task_struct * p)5304 static bool check_same_owner(struct task_struct *p)
5305 {
5306 const struct cred *cred = current_cred(), *pcred;
5307 bool match;
5308
5309 rcu_read_lock();
5310 pcred = __task_cred(p);
5311 match = (cred->euid == pcred->euid ||
5312 cred->euid == pcred->uid);
5313 rcu_read_unlock();
5314 return match;
5315 }
5316
__sched_setscheduler(struct task_struct * p,int policy,struct sched_param * param,bool user)5317 static int __sched_setscheduler(struct task_struct *p, int policy,
5318 struct sched_param *param, bool user)
5319 {
5320 int retval, oldprio, oldpolicy = -1, on_rq, running;
5321 unsigned long flags;
5322 const struct sched_class *prev_class = p->sched_class;
5323 struct rq *rq;
5324
5325 /* may grab non-irq protected spin_locks */
5326 BUG_ON(in_interrupt());
5327 recheck:
5328 /* double check policy once rq lock held */
5329 if (policy < 0)
5330 policy = oldpolicy = p->policy;
5331 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
5332 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
5333 policy != SCHED_IDLE)
5334 return -EINVAL;
5335 /*
5336 * Valid priorities for SCHED_FIFO and SCHED_RR are
5337 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
5338 * SCHED_BATCH and SCHED_IDLE is 0.
5339 */
5340 if (param->sched_priority < 0 ||
5341 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
5342 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
5343 return -EINVAL;
5344 if (rt_policy(policy) != (param->sched_priority != 0))
5345 return -EINVAL;
5346
5347 /*
5348 * Allow unprivileged RT tasks to decrease priority:
5349 */
5350 if (user && !capable(CAP_SYS_NICE)) {
5351 if (rt_policy(policy)) {
5352 unsigned long rlim_rtprio;
5353
5354 if (!lock_task_sighand(p, &flags))
5355 return -ESRCH;
5356 rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
5357 unlock_task_sighand(p, &flags);
5358
5359 /* can't set/change the rt policy */
5360 if (policy != p->policy && !rlim_rtprio)
5361 return -EPERM;
5362
5363 /* can't increase priority */
5364 if (param->sched_priority > p->rt_priority &&
5365 param->sched_priority > rlim_rtprio)
5366 return -EPERM;
5367 }
5368 /*
5369 * Like positive nice levels, dont allow tasks to
5370 * move out of SCHED_IDLE either:
5371 */
5372 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
5373 return -EPERM;
5374
5375 /* can't change other user's priorities */
5376 if (!check_same_owner(p))
5377 return -EPERM;
5378 }
5379
5380 if (user) {
5381 #ifdef CONFIG_RT_GROUP_SCHED
5382 /*
5383 * Do not allow realtime tasks into groups that have no runtime
5384 * assigned.
5385 */
5386 if (rt_bandwidth_enabled() && rt_policy(policy) &&
5387 task_group(p)->rt_bandwidth.rt_runtime == 0)
5388 return -EPERM;
5389 #endif
5390
5391 retval = security_task_setscheduler(p, policy, param);
5392 if (retval)
5393 return retval;
5394 }
5395
5396 /*
5397 * make sure no PI-waiters arrive (or leave) while we are
5398 * changing the priority of the task:
5399 */
5400 spin_lock_irqsave(&p->pi_lock, flags);
5401 /*
5402 * To be able to change p->policy safely, the apropriate
5403 * runqueue lock must be held.
5404 */
5405 rq = __task_rq_lock(p);
5406 /* recheck policy now with rq lock held */
5407 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
5408 policy = oldpolicy = -1;
5409 __task_rq_unlock(rq);
5410 spin_unlock_irqrestore(&p->pi_lock, flags);
5411 goto recheck;
5412 }
5413 update_rq_clock(rq);
5414 on_rq = p->se.on_rq;
5415 running = task_current(rq, p);
5416 if (on_rq)
5417 deactivate_task(rq, p, 0);
5418 if (running)
5419 p->sched_class->put_prev_task(rq, p);
5420
5421 oldprio = p->prio;
5422 __setscheduler(rq, p, policy, param->sched_priority);
5423
5424 if (running)
5425 p->sched_class->set_curr_task(rq);
5426 if (on_rq) {
5427 activate_task(rq, p, 0);
5428
5429 check_class_changed(rq, p, prev_class, oldprio, running);
5430 }
5431 __task_rq_unlock(rq);
5432 spin_unlock_irqrestore(&p->pi_lock, flags);
5433
5434 rt_mutex_adjust_pi(p);
5435
5436 return 0;
5437 }
5438
5439 /**
5440 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
5441 * @p: the task in question.
5442 * @policy: new policy.
5443 * @param: structure containing the new RT priority.
5444 *
5445 * NOTE that the task may be already dead.
5446 */
sched_setscheduler(struct task_struct * p,int policy,struct sched_param * param)5447 int sched_setscheduler(struct task_struct *p, int policy,
5448 struct sched_param *param)
5449 {
5450 return __sched_setscheduler(p, policy, param, true);
5451 }
5452 EXPORT_SYMBOL_GPL(sched_setscheduler);
5453
5454 /**
5455 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
5456 * @p: the task in question.
5457 * @policy: new policy.
5458 * @param: structure containing the new RT priority.
5459 *
5460 * Just like sched_setscheduler, only don't bother checking if the
5461 * current context has permission. For example, this is needed in
5462 * stop_machine(): we create temporary high priority worker threads,
5463 * but our caller might not have that capability.
5464 */
sched_setscheduler_nocheck(struct task_struct * p,int policy,struct sched_param * param)5465 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
5466 struct sched_param *param)
5467 {
5468 return __sched_setscheduler(p, policy, param, false);
5469 }
5470
5471 static int
do_sched_setscheduler(pid_t pid,int policy,struct sched_param __user * param)5472 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
5473 {
5474 struct sched_param lparam;
5475 struct task_struct *p;
5476 int retval;
5477
5478 if (!param || pid < 0)
5479 return -EINVAL;
5480 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
5481 return -EFAULT;
5482
5483 rcu_read_lock();
5484 retval = -ESRCH;
5485 p = find_process_by_pid(pid);
5486 if (p != NULL)
5487 retval = sched_setscheduler(p, policy, &lparam);
5488 rcu_read_unlock();
5489
5490 return retval;
5491 }
5492
5493 /**
5494 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
5495 * @pid: the pid in question.
5496 * @policy: new policy.
5497 * @param: structure containing the new RT priority.
5498 */
SYSCALL_DEFINE3(sched_setscheduler,pid_t,pid,int,policy,struct sched_param __user *,param)5499 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
5500 struct sched_param __user *, param)
5501 {
5502 /* negative values for policy are not valid */
5503 if (policy < 0)
5504 return -EINVAL;
5505
5506 return do_sched_setscheduler(pid, policy, param);
5507 }
5508
5509 /**
5510 * sys_sched_setparam - set/change the RT priority of a thread
5511 * @pid: the pid in question.
5512 * @param: structure containing the new RT priority.
5513 */
SYSCALL_DEFINE2(sched_setparam,pid_t,pid,struct sched_param __user *,param)5514 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
5515 {
5516 return do_sched_setscheduler(pid, -1, param);
5517 }
5518
5519 /**
5520 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5521 * @pid: the pid in question.
5522 */
SYSCALL_DEFINE1(sched_getscheduler,pid_t,pid)5523 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
5524 {
5525 struct task_struct *p;
5526 int retval;
5527
5528 if (pid < 0)
5529 return -EINVAL;
5530
5531 retval = -ESRCH;
5532 read_lock(&tasklist_lock);
5533 p = find_process_by_pid(pid);
5534 if (p) {
5535 retval = security_task_getscheduler(p);
5536 if (!retval)
5537 retval = p->policy;
5538 }
5539 read_unlock(&tasklist_lock);
5540 return retval;
5541 }
5542
5543 /**
5544 * sys_sched_getscheduler - get the RT priority of a thread
5545 * @pid: the pid in question.
5546 * @param: structure containing the RT priority.
5547 */
SYSCALL_DEFINE2(sched_getparam,pid_t,pid,struct sched_param __user *,param)5548 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
5549 {
5550 struct sched_param lp;
5551 struct task_struct *p;
5552 int retval;
5553
5554 if (!param || pid < 0)
5555 return -EINVAL;
5556
5557 read_lock(&tasklist_lock);
5558 p = find_process_by_pid(pid);
5559 retval = -ESRCH;
5560 if (!p)
5561 goto out_unlock;
5562
5563 retval = security_task_getscheduler(p);
5564 if (retval)
5565 goto out_unlock;
5566
5567 lp.sched_priority = p->rt_priority;
5568 read_unlock(&tasklist_lock);
5569
5570 /*
5571 * This one might sleep, we cannot do it with a spinlock held ...
5572 */
5573 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5574
5575 return retval;
5576
5577 out_unlock:
5578 read_unlock(&tasklist_lock);
5579 return retval;
5580 }
5581
sched_setaffinity(pid_t pid,const struct cpumask * in_mask)5582 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
5583 {
5584 cpumask_var_t cpus_allowed, new_mask;
5585 struct task_struct *p;
5586 int retval;
5587
5588 get_online_cpus();
5589 read_lock(&tasklist_lock);
5590
5591 p = find_process_by_pid(pid);
5592 if (!p) {
5593 read_unlock(&tasklist_lock);
5594 put_online_cpus();
5595 return -ESRCH;
5596 }
5597
5598 /*
5599 * It is not safe to call set_cpus_allowed with the
5600 * tasklist_lock held. We will bump the task_struct's
5601 * usage count and then drop tasklist_lock.
5602 */
5603 get_task_struct(p);
5604 read_unlock(&tasklist_lock);
5605
5606 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5607 retval = -ENOMEM;
5608 goto out_put_task;
5609 }
5610 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5611 retval = -ENOMEM;
5612 goto out_free_cpus_allowed;
5613 }
5614 retval = -EPERM;
5615 if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
5616 goto out_unlock;
5617
5618 retval = security_task_setscheduler(p, 0, NULL);
5619 if (retval)
5620 goto out_unlock;
5621
5622 cpuset_cpus_allowed(p, cpus_allowed);
5623 cpumask_and(new_mask, in_mask, cpus_allowed);
5624 again:
5625 retval = set_cpus_allowed_ptr(p, new_mask);
5626
5627 if (!retval) {
5628 cpuset_cpus_allowed(p, cpus_allowed);
5629 if (!cpumask_subset(new_mask, cpus_allowed)) {
5630 /*
5631 * We must have raced with a concurrent cpuset
5632 * update. Just reset the cpus_allowed to the
5633 * cpuset's cpus_allowed
5634 */
5635 cpumask_copy(new_mask, cpus_allowed);
5636 goto again;
5637 }
5638 }
5639 out_unlock:
5640 free_cpumask_var(new_mask);
5641 out_free_cpus_allowed:
5642 free_cpumask_var(cpus_allowed);
5643 out_put_task:
5644 put_task_struct(p);
5645 put_online_cpus();
5646 return retval;
5647 }
5648
get_user_cpu_mask(unsigned long __user * user_mask_ptr,unsigned len,struct cpumask * new_mask)5649 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
5650 struct cpumask *new_mask)
5651 {
5652 if (len < cpumask_size())
5653 cpumask_clear(new_mask);
5654 else if (len > cpumask_size())
5655 len = cpumask_size();
5656
5657 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5658 }
5659
5660 /**
5661 * sys_sched_setaffinity - set the cpu affinity of a process
5662 * @pid: pid of the process
5663 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5664 * @user_mask_ptr: user-space pointer to the new cpu mask
5665 */
SYSCALL_DEFINE3(sched_setaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)5666 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5667 unsigned long __user *, user_mask_ptr)
5668 {
5669 cpumask_var_t new_mask;
5670 int retval;
5671
5672 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5673 return -ENOMEM;
5674
5675 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5676 if (retval == 0)
5677 retval = sched_setaffinity(pid, new_mask);
5678 free_cpumask_var(new_mask);
5679 return retval;
5680 }
5681
sched_getaffinity(pid_t pid,struct cpumask * mask)5682 long sched_getaffinity(pid_t pid, struct cpumask *mask)
5683 {
5684 struct task_struct *p;
5685 int retval;
5686
5687 get_online_cpus();
5688 read_lock(&tasklist_lock);
5689
5690 retval = -ESRCH;
5691 p = find_process_by_pid(pid);
5692 if (!p)
5693 goto out_unlock;
5694
5695 retval = security_task_getscheduler(p);
5696 if (retval)
5697 goto out_unlock;
5698
5699 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
5700
5701 out_unlock:
5702 read_unlock(&tasklist_lock);
5703 put_online_cpus();
5704
5705 return retval;
5706 }
5707
5708 /**
5709 * sys_sched_getaffinity - get the cpu affinity of a process
5710 * @pid: pid of the process
5711 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5712 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5713 */
SYSCALL_DEFINE3(sched_getaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)5714 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5715 unsigned long __user *, user_mask_ptr)
5716 {
5717 int ret;
5718 cpumask_var_t mask;
5719
5720 if (len < cpumask_size())
5721 return -EINVAL;
5722
5723 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5724 return -ENOMEM;
5725
5726 ret = sched_getaffinity(pid, mask);
5727 if (ret == 0) {
5728 if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
5729 ret = -EFAULT;
5730 else
5731 ret = cpumask_size();
5732 }
5733 free_cpumask_var(mask);
5734
5735 return ret;
5736 }
5737
5738 /**
5739 * sys_sched_yield - yield the current processor to other threads.
5740 *
5741 * This function yields the current CPU to other tasks. If there are no
5742 * other threads running on this CPU then this function will return.
5743 */
SYSCALL_DEFINE0(sched_yield)5744 SYSCALL_DEFINE0(sched_yield)
5745 {
5746 struct rq *rq = this_rq_lock();
5747
5748 schedstat_inc(rq, yld_count);
5749 current->sched_class->yield_task(rq);
5750
5751 /*
5752 * Since we are going to call schedule() anyway, there's
5753 * no need to preempt or enable interrupts:
5754 */
5755 __release(rq->lock);
5756 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
5757 _raw_spin_unlock(&rq->lock);
5758 preempt_enable_no_resched();
5759
5760 schedule();
5761
5762 return 0;
5763 }
5764
__cond_resched(void)5765 static void __cond_resched(void)
5766 {
5767 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
5768 __might_sleep(__FILE__, __LINE__);
5769 #endif
5770 /*
5771 * The BKS might be reacquired before we have dropped
5772 * PREEMPT_ACTIVE, which could trigger a second
5773 * cond_resched() call.
5774 */
5775 do {
5776 add_preempt_count(PREEMPT_ACTIVE);
5777 schedule();
5778 sub_preempt_count(PREEMPT_ACTIVE);
5779 } while (need_resched());
5780 }
5781
_cond_resched(void)5782 int __sched _cond_resched(void)
5783 {
5784 if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
5785 system_state == SYSTEM_RUNNING) {
5786 __cond_resched();
5787 return 1;
5788 }
5789 return 0;
5790 }
5791 EXPORT_SYMBOL(_cond_resched);
5792
5793 /*
5794 * cond_resched_lock() - if a reschedule is pending, drop the given lock,
5795 * call schedule, and on return reacquire the lock.
5796 *
5797 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
5798 * operations here to prevent schedule() from being called twice (once via
5799 * spin_unlock(), once by hand).
5800 */
cond_resched_lock(spinlock_t * lock)5801 int cond_resched_lock(spinlock_t *lock)
5802 {
5803 int resched = need_resched() && system_state == SYSTEM_RUNNING;
5804 int ret = 0;
5805
5806 if (spin_needbreak(lock) || resched) {
5807 spin_unlock(lock);
5808 if (resched && need_resched())
5809 __cond_resched();
5810 else
5811 cpu_relax();
5812 ret = 1;
5813 spin_lock(lock);
5814 }
5815 return ret;
5816 }
5817 EXPORT_SYMBOL(cond_resched_lock);
5818
cond_resched_softirq(void)5819 int __sched cond_resched_softirq(void)
5820 {
5821 BUG_ON(!in_softirq());
5822
5823 if (need_resched() && system_state == SYSTEM_RUNNING) {
5824 local_bh_enable();
5825 __cond_resched();
5826 local_bh_disable();
5827 return 1;
5828 }
5829 return 0;
5830 }
5831 EXPORT_SYMBOL(cond_resched_softirq);
5832
5833 /**
5834 * yield - yield the current processor to other threads.
5835 *
5836 * This is a shortcut for kernel-space yielding - it marks the
5837 * thread runnable and calls sys_sched_yield().
5838 */
yield(void)5839 void __sched yield(void)
5840 {
5841 set_current_state(TASK_RUNNING);
5842 sys_sched_yield();
5843 }
5844 EXPORT_SYMBOL(yield);
5845
5846 /*
5847 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
5848 * that process accounting knows that this is a task in IO wait state.
5849 *
5850 * But don't do that if it is a deliberate, throttling IO wait (this task
5851 * has set its backing_dev_info: the queue against which it should throttle)
5852 */
io_schedule(void)5853 void __sched io_schedule(void)
5854 {
5855 struct rq *rq = &__raw_get_cpu_var(runqueues);
5856
5857 delayacct_blkio_start();
5858 atomic_inc(&rq->nr_iowait);
5859 schedule();
5860 atomic_dec(&rq->nr_iowait);
5861 delayacct_blkio_end();
5862 }
5863 EXPORT_SYMBOL(io_schedule);
5864
io_schedule_timeout(long timeout)5865 long __sched io_schedule_timeout(long timeout)
5866 {
5867 struct rq *rq = &__raw_get_cpu_var(runqueues);
5868 long ret;
5869
5870 delayacct_blkio_start();
5871 atomic_inc(&rq->nr_iowait);
5872 ret = schedule_timeout(timeout);
5873 atomic_dec(&rq->nr_iowait);
5874 delayacct_blkio_end();
5875 return ret;
5876 }
5877
5878 /**
5879 * sys_sched_get_priority_max - return maximum RT priority.
5880 * @policy: scheduling class.
5881 *
5882 * this syscall returns the maximum rt_priority that can be used
5883 * by a given scheduling class.
5884 */
SYSCALL_DEFINE1(sched_get_priority_max,int,policy)5885 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
5886 {
5887 int ret = -EINVAL;
5888
5889 switch (policy) {
5890 case SCHED_FIFO:
5891 case SCHED_RR:
5892 ret = MAX_USER_RT_PRIO-1;
5893 break;
5894 case SCHED_NORMAL:
5895 case SCHED_BATCH:
5896 case SCHED_IDLE:
5897 ret = 0;
5898 break;
5899 }
5900 return ret;
5901 }
5902
5903 /**
5904 * sys_sched_get_priority_min - return minimum RT priority.
5905 * @policy: scheduling class.
5906 *
5907 * this syscall returns the minimum rt_priority that can be used
5908 * by a given scheduling class.
5909 */
SYSCALL_DEFINE1(sched_get_priority_min,int,policy)5910 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
5911 {
5912 int ret = -EINVAL;
5913
5914 switch (policy) {
5915 case SCHED_FIFO:
5916 case SCHED_RR:
5917 ret = 1;
5918 break;
5919 case SCHED_NORMAL:
5920 case SCHED_BATCH:
5921 case SCHED_IDLE:
5922 ret = 0;
5923 }
5924 return ret;
5925 }
5926
5927 /**
5928 * sys_sched_rr_get_interval - return the default timeslice of a process.
5929 * @pid: pid of the process.
5930 * @interval: userspace pointer to the timeslice value.
5931 *
5932 * this syscall writes the default timeslice value of a given process
5933 * into the user-space timespec buffer. A value of '0' means infinity.
5934 */
SYSCALL_DEFINE2(sched_rr_get_interval,pid_t,pid,struct timespec __user *,interval)5935 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5936 struct timespec __user *, interval)
5937 {
5938 struct task_struct *p;
5939 unsigned int time_slice;
5940 int retval;
5941 struct timespec t;
5942
5943 if (pid < 0)
5944 return -EINVAL;
5945
5946 retval = -ESRCH;
5947 read_lock(&tasklist_lock);
5948 p = find_process_by_pid(pid);
5949 if (!p)
5950 goto out_unlock;
5951
5952 retval = security_task_getscheduler(p);
5953 if (retval)
5954 goto out_unlock;
5955
5956 /*
5957 * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
5958 * tasks that are on an otherwise idle runqueue:
5959 */
5960 time_slice = 0;
5961 if (p->policy == SCHED_RR) {
5962 time_slice = DEF_TIMESLICE;
5963 } else if (p->policy != SCHED_FIFO) {
5964 struct sched_entity *se = &p->se;
5965 unsigned long flags;
5966 struct rq *rq;
5967
5968 rq = task_rq_lock(p, &flags);
5969 if (rq->cfs.load.weight)
5970 time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
5971 task_rq_unlock(rq, &flags);
5972 }
5973 read_unlock(&tasklist_lock);
5974 jiffies_to_timespec(time_slice, &t);
5975 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
5976 return retval;
5977
5978 out_unlock:
5979 read_unlock(&tasklist_lock);
5980 return retval;
5981 }
5982
5983 static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
5984
sched_show_task(struct task_struct * p)5985 void sched_show_task(struct task_struct *p)
5986 {
5987 unsigned long free = 0;
5988 unsigned state;
5989
5990 state = p->state ? __ffs(p->state) + 1 : 0;
5991 printk(KERN_INFO "%-13.13s %c", p->comm,
5992 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
5993 #if BITS_PER_LONG == 32
5994 if (state == TASK_RUNNING)
5995 printk(KERN_CONT " running ");
5996 else
5997 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
5998 #else
5999 if (state == TASK_RUNNING)
6000 printk(KERN_CONT " running task ");
6001 else
6002 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
6003 #endif
6004 #ifdef CONFIG_DEBUG_STACK_USAGE
6005 {
6006 unsigned long *n = end_of_stack(p);
6007 while (!*n)
6008 n++;
6009 free = (unsigned long)n - (unsigned long)end_of_stack(p);
6010 }
6011 #endif
6012 printk(KERN_CONT "%5lu %5d %6d\n", free,
6013 task_pid_nr(p), task_pid_nr(p->real_parent));
6014
6015 show_stack(p, NULL);
6016 }
6017
show_state_filter(unsigned long state_filter)6018 void show_state_filter(unsigned long state_filter)
6019 {
6020 struct task_struct *g, *p;
6021
6022 #if BITS_PER_LONG == 32
6023 printk(KERN_INFO
6024 " task PC stack pid father\n");
6025 #else
6026 printk(KERN_INFO
6027 " task PC stack pid father\n");
6028 #endif
6029 read_lock(&tasklist_lock);
6030 do_each_thread(g, p) {
6031 /*
6032 * reset the NMI-timeout, listing all files on a slow
6033 * console might take alot of time:
6034 */
6035 touch_nmi_watchdog();
6036 if (!state_filter || (p->state & state_filter))
6037 sched_show_task(p);
6038 } while_each_thread(g, p);
6039
6040 touch_all_softlockup_watchdogs();
6041
6042 #ifdef CONFIG_SCHED_DEBUG
6043 sysrq_sched_debug_show();
6044 #endif
6045 read_unlock(&tasklist_lock);
6046 /*
6047 * Only show locks if all tasks are dumped:
6048 */
6049 if (state_filter == -1)
6050 debug_show_all_locks();
6051 }
6052
init_idle_bootup_task(struct task_struct * idle)6053 void __cpuinit init_idle_bootup_task(struct task_struct *idle)
6054 {
6055 idle->sched_class = &idle_sched_class;
6056 }
6057
6058 /**
6059 * init_idle - set up an idle thread for a given CPU
6060 * @idle: task in question
6061 * @cpu: cpu the idle task belongs to
6062 *
6063 * NOTE: this function does not set the idle thread's NEED_RESCHED
6064 * flag, to make booting more robust.
6065 */
init_idle(struct task_struct * idle,int cpu)6066 void __cpuinit init_idle(struct task_struct *idle, int cpu)
6067 {
6068 struct rq *rq = cpu_rq(cpu);
6069 unsigned long flags;
6070
6071 spin_lock_irqsave(&rq->lock, flags);
6072
6073 __sched_fork(idle);
6074 idle->se.exec_start = sched_clock();
6075
6076 idle->prio = idle->normal_prio = MAX_PRIO;
6077 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
6078 __set_task_cpu(idle, cpu);
6079
6080 rq->curr = rq->idle = idle;
6081 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
6082 idle->oncpu = 1;
6083 #endif
6084 spin_unlock_irqrestore(&rq->lock, flags);
6085
6086 /* Set the preempt count _outside_ the spinlocks! */
6087 #if defined(CONFIG_PREEMPT)
6088 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
6089 #else
6090 task_thread_info(idle)->preempt_count = 0;
6091 #endif
6092 /*
6093 * The idle tasks have their own, simple scheduling class:
6094 */
6095 idle->sched_class = &idle_sched_class;
6096 ftrace_graph_init_task(idle);
6097 }
6098
6099 /*
6100 * In a system that switches off the HZ timer nohz_cpu_mask
6101 * indicates which cpus entered this state. This is used
6102 * in the rcu update to wait only for active cpus. For system
6103 * which do not switch off the HZ timer nohz_cpu_mask should
6104 * always be CPU_BITS_NONE.
6105 */
6106 cpumask_var_t nohz_cpu_mask;
6107
6108 /*
6109 * Increase the granularity value when there are more CPUs,
6110 * because with more CPUs the 'effective latency' as visible
6111 * to users decreases. But the relationship is not linear,
6112 * so pick a second-best guess by going with the log2 of the
6113 * number of CPUs.
6114 *
6115 * This idea comes from the SD scheduler of Con Kolivas:
6116 */
sched_init_granularity(void)6117 static inline void sched_init_granularity(void)
6118 {
6119 unsigned int factor = 1 + ilog2(num_online_cpus());
6120 const unsigned long limit = 200000000;
6121
6122 sysctl_sched_min_granularity *= factor;
6123 if (sysctl_sched_min_granularity > limit)
6124 sysctl_sched_min_granularity = limit;
6125
6126 sysctl_sched_latency *= factor;
6127 if (sysctl_sched_latency > limit)
6128 sysctl_sched_latency = limit;
6129
6130 sysctl_sched_wakeup_granularity *= factor;
6131
6132 sysctl_sched_shares_ratelimit *= factor;
6133 }
6134
6135 #ifdef CONFIG_SMP
6136 /*
6137 * This is how migration works:
6138 *
6139 * 1) we queue a struct migration_req structure in the source CPU's
6140 * runqueue and wake up that CPU's migration thread.
6141 * 2) we down() the locked semaphore => thread blocks.
6142 * 3) migration thread wakes up (implicitly it forces the migrated
6143 * thread off the CPU)
6144 * 4) it gets the migration request and checks whether the migrated
6145 * task is still in the wrong runqueue.
6146 * 5) if it's in the wrong runqueue then the migration thread removes
6147 * it and puts it into the right queue.
6148 * 6) migration thread up()s the semaphore.
6149 * 7) we wake up and the migration is done.
6150 */
6151
6152 /*
6153 * Change a given task's CPU affinity. Migrate the thread to a
6154 * proper CPU and schedule it away if the CPU it's executing on
6155 * is removed from the allowed bitmask.
6156 *
6157 * NOTE: the caller must have a valid reference to the task, the
6158 * task must not exit() & deallocate itself prematurely. The
6159 * call is not atomic; no spinlocks may be held.
6160 */
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)6161 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
6162 {
6163 struct migration_req req;
6164 unsigned long flags;
6165 struct rq *rq;
6166 int ret = 0;
6167
6168 rq = task_rq_lock(p, &flags);
6169 if (!cpumask_intersects(new_mask, cpu_online_mask)) {
6170 ret = -EINVAL;
6171 goto out;
6172 }
6173
6174 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
6175 !cpumask_equal(&p->cpus_allowed, new_mask))) {
6176 ret = -EINVAL;
6177 goto out;
6178 }
6179
6180 if (p->sched_class->set_cpus_allowed)
6181 p->sched_class->set_cpus_allowed(p, new_mask);
6182 else {
6183 cpumask_copy(&p->cpus_allowed, new_mask);
6184 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
6185 }
6186
6187 /* Can the task run on the task's current CPU? If so, we're done */
6188 if (cpumask_test_cpu(task_cpu(p), new_mask))
6189 goto out;
6190
6191 if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
6192 /* Need help from migration thread: drop lock and wait. */
6193 task_rq_unlock(rq, &flags);
6194 wake_up_process(rq->migration_thread);
6195 wait_for_completion(&req.done);
6196 tlb_migrate_finish(p->mm);
6197 return 0;
6198 }
6199 out:
6200 task_rq_unlock(rq, &flags);
6201
6202 return ret;
6203 }
6204 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
6205
6206 /*
6207 * Move (not current) task off this cpu, onto dest cpu. We're doing
6208 * this because either it can't run here any more (set_cpus_allowed()
6209 * away from this CPU, or CPU going down), or because we're
6210 * attempting to rebalance this task on exec (sched_exec).
6211 *
6212 * So we race with normal scheduler movements, but that's OK, as long
6213 * as the task is no longer on this CPU.
6214 *
6215 * Returns non-zero if task was successfully migrated.
6216 */
__migrate_task(struct task_struct * p,int src_cpu,int dest_cpu)6217 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
6218 {
6219 struct rq *rq_dest, *rq_src;
6220 int ret = 0, on_rq;
6221
6222 if (unlikely(!cpu_active(dest_cpu)))
6223 return ret;
6224
6225 rq_src = cpu_rq(src_cpu);
6226 rq_dest = cpu_rq(dest_cpu);
6227
6228 double_rq_lock(rq_src, rq_dest);
6229 /* Already moved. */
6230 if (task_cpu(p) != src_cpu)
6231 goto done;
6232 /* Affinity changed (again). */
6233 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
6234 goto fail;
6235
6236 on_rq = p->se.on_rq;
6237 if (on_rq)
6238 deactivate_task(rq_src, p, 0);
6239
6240 set_task_cpu(p, dest_cpu);
6241 if (on_rq) {
6242 activate_task(rq_dest, p, 0);
6243 check_preempt_curr(rq_dest, p, 0);
6244 }
6245 done:
6246 ret = 1;
6247 fail:
6248 double_rq_unlock(rq_src, rq_dest);
6249 return ret;
6250 }
6251
6252 /*
6253 * migration_thread - this is a highprio system thread that performs
6254 * thread migration by bumping thread off CPU then 'pushing' onto
6255 * another runqueue.
6256 */
migration_thread(void * data)6257 static int migration_thread(void *data)
6258 {
6259 int cpu = (long)data;
6260 struct rq *rq;
6261
6262 rq = cpu_rq(cpu);
6263 BUG_ON(rq->migration_thread != current);
6264
6265 set_current_state(TASK_INTERRUPTIBLE);
6266 while (!kthread_should_stop()) {
6267 struct migration_req *req;
6268 struct list_head *head;
6269
6270 spin_lock_irq(&rq->lock);
6271
6272 if (cpu_is_offline(cpu)) {
6273 spin_unlock_irq(&rq->lock);
6274 goto wait_to_die;
6275 }
6276
6277 if (rq->active_balance) {
6278 active_load_balance(rq, cpu);
6279 rq->active_balance = 0;
6280 }
6281
6282 head = &rq->migration_queue;
6283
6284 if (list_empty(head)) {
6285 spin_unlock_irq(&rq->lock);
6286 schedule();
6287 set_current_state(TASK_INTERRUPTIBLE);
6288 continue;
6289 }
6290 req = list_entry(head->next, struct migration_req, list);
6291 list_del_init(head->next);
6292
6293 spin_unlock(&rq->lock);
6294 __migrate_task(req->task, cpu, req->dest_cpu);
6295 local_irq_enable();
6296
6297 complete(&req->done);
6298 }
6299 __set_current_state(TASK_RUNNING);
6300 return 0;
6301
6302 wait_to_die:
6303 /* Wait for kthread_stop */
6304 set_current_state(TASK_INTERRUPTIBLE);
6305 while (!kthread_should_stop()) {
6306 schedule();
6307 set_current_state(TASK_INTERRUPTIBLE);
6308 }
6309 __set_current_state(TASK_RUNNING);
6310 return 0;
6311 }
6312
6313 #ifdef CONFIG_HOTPLUG_CPU
6314
__migrate_task_irq(struct task_struct * p,int src_cpu,int dest_cpu)6315 static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
6316 {
6317 int ret;
6318
6319 local_irq_disable();
6320 ret = __migrate_task(p, src_cpu, dest_cpu);
6321 local_irq_enable();
6322 return ret;
6323 }
6324
6325 /*
6326 * Figure out where task on dead CPU should go, use force if necessary.
6327 */
move_task_off_dead_cpu(int dead_cpu,struct task_struct * p)6328 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
6329 {
6330 int dest_cpu;
6331 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
6332
6333 again:
6334 /* Look for allowed, online CPU in same node. */
6335 for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
6336 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
6337 goto move;
6338
6339 /* Any allowed, online CPU? */
6340 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
6341 if (dest_cpu < nr_cpu_ids)
6342 goto move;
6343
6344 /* No more Mr. Nice Guy. */
6345 if (dest_cpu >= nr_cpu_ids) {
6346 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
6347 dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
6348
6349 /*
6350 * Don't tell them about moving exiting tasks or
6351 * kernel threads (both mm NULL), since they never
6352 * leave kernel.
6353 */
6354 if (p->mm && printk_ratelimit()) {
6355 printk(KERN_INFO "process %d (%s) no "
6356 "longer affine to cpu%d\n",
6357 task_pid_nr(p), p->comm, dead_cpu);
6358 }
6359 }
6360
6361 move:
6362 /* It can have affinity changed while we were choosing. */
6363 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
6364 goto again;
6365 }
6366
6367 /*
6368 * While a dead CPU has no uninterruptible tasks queued at this point,
6369 * it might still have a nonzero ->nr_uninterruptible counter, because
6370 * for performance reasons the counter is not stricly tracking tasks to
6371 * their home CPUs. So we just add the counter to another CPU's counter,
6372 * to keep the global sum constant after CPU-down:
6373 */
migrate_nr_uninterruptible(struct rq * rq_src)6374 static void migrate_nr_uninterruptible(struct rq *rq_src)
6375 {
6376 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
6377 unsigned long flags;
6378
6379 local_irq_save(flags);
6380 double_rq_lock(rq_src, rq_dest);
6381 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
6382 rq_src->nr_uninterruptible = 0;
6383 double_rq_unlock(rq_src, rq_dest);
6384 local_irq_restore(flags);
6385 }
6386
6387 /* Run through task list and migrate tasks from the dead cpu. */
migrate_live_tasks(int src_cpu)6388 static void migrate_live_tasks(int src_cpu)
6389 {
6390 struct task_struct *p, *t;
6391
6392 read_lock(&tasklist_lock);
6393
6394 do_each_thread(t, p) {
6395 if (p == current)
6396 continue;
6397
6398 if (task_cpu(p) == src_cpu)
6399 move_task_off_dead_cpu(src_cpu, p);
6400 } while_each_thread(t, p);
6401
6402 read_unlock(&tasklist_lock);
6403 }
6404
6405 /*
6406 * Schedules idle task to be the next runnable task on current CPU.
6407 * It does so by boosting its priority to highest possible.
6408 * Used by CPU offline code.
6409 */
sched_idle_next(void)6410 void sched_idle_next(void)
6411 {
6412 int this_cpu = smp_processor_id();
6413 struct rq *rq = cpu_rq(this_cpu);
6414 struct task_struct *p = rq->idle;
6415 unsigned long flags;
6416
6417 /* cpu has to be offline */
6418 BUG_ON(cpu_online(this_cpu));
6419
6420 /*
6421 * Strictly not necessary since rest of the CPUs are stopped by now
6422 * and interrupts disabled on the current cpu.
6423 */
6424 spin_lock_irqsave(&rq->lock, flags);
6425
6426 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
6427
6428 update_rq_clock(rq);
6429 activate_task(rq, p, 0);
6430
6431 spin_unlock_irqrestore(&rq->lock, flags);
6432 }
6433
6434 /*
6435 * Ensures that the idle task is using init_mm right before its cpu goes
6436 * offline.
6437 */
idle_task_exit(void)6438 void idle_task_exit(void)
6439 {
6440 struct mm_struct *mm = current->active_mm;
6441
6442 BUG_ON(cpu_online(smp_processor_id()));
6443
6444 if (mm != &init_mm)
6445 switch_mm(mm, &init_mm, current);
6446 mmdrop(mm);
6447 }
6448
6449 /* called under rq->lock with disabled interrupts */
migrate_dead(unsigned int dead_cpu,struct task_struct * p)6450 static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
6451 {
6452 struct rq *rq = cpu_rq(dead_cpu);
6453
6454 /* Must be exiting, otherwise would be on tasklist. */
6455 BUG_ON(!p->exit_state);
6456
6457 /* Cannot have done final schedule yet: would have vanished. */
6458 BUG_ON(p->state == TASK_DEAD);
6459
6460 get_task_struct(p);
6461
6462 /*
6463 * Drop lock around migration; if someone else moves it,
6464 * that's OK. No task can be added to this CPU, so iteration is
6465 * fine.
6466 */
6467 spin_unlock_irq(&rq->lock);
6468 move_task_off_dead_cpu(dead_cpu, p);
6469 spin_lock_irq(&rq->lock);
6470
6471 put_task_struct(p);
6472 }
6473
6474 /* release_task() removes task from tasklist, so we won't find dead tasks. */
migrate_dead_tasks(unsigned int dead_cpu)6475 static void migrate_dead_tasks(unsigned int dead_cpu)
6476 {
6477 struct rq *rq = cpu_rq(dead_cpu);
6478 struct task_struct *next;
6479
6480 for ( ; ; ) {
6481 if (!rq->nr_running)
6482 break;
6483 update_rq_clock(rq);
6484 next = pick_next_task(rq, rq->curr);
6485 if (!next)
6486 break;
6487 next->sched_class->put_prev_task(rq, next);
6488 migrate_dead(dead_cpu, next);
6489
6490 }
6491 }
6492 #endif /* CONFIG_HOTPLUG_CPU */
6493
6494 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
6495
6496 static struct ctl_table sd_ctl_dir[] = {
6497 {
6498 .procname = "sched_domain",
6499 .mode = 0555,
6500 },
6501 {0, },
6502 };
6503
6504 static struct ctl_table sd_ctl_root[] = {
6505 {
6506 .ctl_name = CTL_KERN,
6507 .procname = "kernel",
6508 .mode = 0555,
6509 .child = sd_ctl_dir,
6510 },
6511 {0, },
6512 };
6513
sd_alloc_ctl_entry(int n)6514 static struct ctl_table *sd_alloc_ctl_entry(int n)
6515 {
6516 struct ctl_table *entry =
6517 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
6518
6519 return entry;
6520 }
6521
sd_free_ctl_entry(struct ctl_table ** tablep)6522 static void sd_free_ctl_entry(struct ctl_table **tablep)
6523 {
6524 struct ctl_table *entry;
6525
6526 /*
6527 * In the intermediate directories, both the child directory and
6528 * procname are dynamically allocated and could fail but the mode
6529 * will always be set. In the lowest directory the names are
6530 * static strings and all have proc handlers.
6531 */
6532 for (entry = *tablep; entry->mode; entry++) {
6533 if (entry->child)
6534 sd_free_ctl_entry(&entry->child);
6535 if (entry->proc_handler == NULL)
6536 kfree(entry->procname);
6537 }
6538
6539 kfree(*tablep);
6540 *tablep = NULL;
6541 }
6542
6543 static void
set_table_entry(struct ctl_table * entry,const char * procname,void * data,int maxlen,mode_t mode,proc_handler * proc_handler)6544 set_table_entry(struct ctl_table *entry,
6545 const char *procname, void *data, int maxlen,
6546 mode_t mode, proc_handler *proc_handler)
6547 {
6548 entry->procname = procname;
6549 entry->data = data;
6550 entry->maxlen = maxlen;
6551 entry->mode = mode;
6552 entry->proc_handler = proc_handler;
6553 }
6554
6555 static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain * sd)6556 sd_alloc_ctl_domain_table(struct sched_domain *sd)
6557 {
6558 struct ctl_table *table = sd_alloc_ctl_entry(13);
6559
6560 if (table == NULL)
6561 return NULL;
6562
6563 set_table_entry(&table[0], "min_interval", &sd->min_interval,
6564 sizeof(long), 0644, proc_doulongvec_minmax);
6565 set_table_entry(&table[1], "max_interval", &sd->max_interval,
6566 sizeof(long), 0644, proc_doulongvec_minmax);
6567 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
6568 sizeof(int), 0644, proc_dointvec_minmax);
6569 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
6570 sizeof(int), 0644, proc_dointvec_minmax);
6571 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
6572 sizeof(int), 0644, proc_dointvec_minmax);
6573 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
6574 sizeof(int), 0644, proc_dointvec_minmax);
6575 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
6576 sizeof(int), 0644, proc_dointvec_minmax);
6577 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
6578 sizeof(int), 0644, proc_dointvec_minmax);
6579 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
6580 sizeof(int), 0644, proc_dointvec_minmax);
6581 set_table_entry(&table[9], "cache_nice_tries",
6582 &sd->cache_nice_tries,
6583 sizeof(int), 0644, proc_dointvec_minmax);
6584 set_table_entry(&table[10], "flags", &sd->flags,
6585 sizeof(int), 0644, proc_dointvec_minmax);
6586 set_table_entry(&table[11], "name", sd->name,
6587 CORENAME_MAX_SIZE, 0444, proc_dostring);
6588 /* &table[12] is terminator */
6589
6590 return table;
6591 }
6592
sd_alloc_ctl_cpu_table(int cpu)6593 static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
6594 {
6595 struct ctl_table *entry, *table;
6596 struct sched_domain *sd;
6597 int domain_num = 0, i;
6598 char buf[32];
6599
6600 for_each_domain(cpu, sd)
6601 domain_num++;
6602 entry = table = sd_alloc_ctl_entry(domain_num + 1);
6603 if (table == NULL)
6604 return NULL;
6605
6606 i = 0;
6607 for_each_domain(cpu, sd) {
6608 snprintf(buf, 32, "domain%d", i);
6609 entry->procname = kstrdup(buf, GFP_KERNEL);
6610 entry->mode = 0555;
6611 entry->child = sd_alloc_ctl_domain_table(sd);
6612 entry++;
6613 i++;
6614 }
6615 return table;
6616 }
6617
6618 static struct ctl_table_header *sd_sysctl_header;
register_sched_domain_sysctl(void)6619 static void register_sched_domain_sysctl(void)
6620 {
6621 int i, cpu_num = num_online_cpus();
6622 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
6623 char buf[32];
6624
6625 WARN_ON(sd_ctl_dir[0].child);
6626 sd_ctl_dir[0].child = entry;
6627
6628 if (entry == NULL)
6629 return;
6630
6631 for_each_online_cpu(i) {
6632 snprintf(buf, 32, "cpu%d", i);
6633 entry->procname = kstrdup(buf, GFP_KERNEL);
6634 entry->mode = 0555;
6635 entry->child = sd_alloc_ctl_cpu_table(i);
6636 entry++;
6637 }
6638
6639 WARN_ON(sd_sysctl_header);
6640 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
6641 }
6642
6643 /* may be called multiple times per register */
unregister_sched_domain_sysctl(void)6644 static void unregister_sched_domain_sysctl(void)
6645 {
6646 if (sd_sysctl_header)
6647 unregister_sysctl_table(sd_sysctl_header);
6648 sd_sysctl_header = NULL;
6649 if (sd_ctl_dir[0].child)
6650 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6651 }
6652 #else
register_sched_domain_sysctl(void)6653 static void register_sched_domain_sysctl(void)
6654 {
6655 }
unregister_sched_domain_sysctl(void)6656 static void unregister_sched_domain_sysctl(void)
6657 {
6658 }
6659 #endif
6660
set_rq_online(struct rq * rq)6661 static void set_rq_online(struct rq *rq)
6662 {
6663 if (!rq->online) {
6664 const struct sched_class *class;
6665
6666 cpumask_set_cpu(rq->cpu, rq->rd->online);
6667 rq->online = 1;
6668
6669 for_each_class(class) {
6670 if (class->rq_online)
6671 class->rq_online(rq);
6672 }
6673 }
6674 }
6675
set_rq_offline(struct rq * rq)6676 static void set_rq_offline(struct rq *rq)
6677 {
6678 if (rq->online) {
6679 const struct sched_class *class;
6680
6681 for_each_class(class) {
6682 if (class->rq_offline)
6683 class->rq_offline(rq);
6684 }
6685
6686 cpumask_clear_cpu(rq->cpu, rq->rd->online);
6687 rq->online = 0;
6688 }
6689 }
6690
6691 /*
6692 * migration_call - callback that gets triggered when a CPU is added.
6693 * Here we can start up the necessary migration thread for the new CPU.
6694 */
6695 static int __cpuinit
migration_call(struct notifier_block * nfb,unsigned long action,void * hcpu)6696 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6697 {
6698 struct task_struct *p;
6699 int cpu = (long)hcpu;
6700 unsigned long flags;
6701 struct rq *rq;
6702
6703 switch (action) {
6704
6705 case CPU_UP_PREPARE:
6706 case CPU_UP_PREPARE_FROZEN:
6707 p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
6708 if (IS_ERR(p))
6709 return NOTIFY_BAD;
6710 kthread_bind(p, cpu);
6711 /* Must be high prio: stop_machine expects to yield to it. */
6712 rq = task_rq_lock(p, &flags);
6713 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
6714 task_rq_unlock(rq, &flags);
6715 cpu_rq(cpu)->migration_thread = p;
6716 break;
6717
6718 case CPU_ONLINE:
6719 case CPU_ONLINE_FROZEN:
6720 /* Strictly unnecessary, as first user will wake it. */
6721 wake_up_process(cpu_rq(cpu)->migration_thread);
6722
6723 /* Update our root-domain */
6724 rq = cpu_rq(cpu);
6725 spin_lock_irqsave(&rq->lock, flags);
6726 if (rq->rd) {
6727 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6728
6729 set_rq_online(rq);
6730 }
6731 spin_unlock_irqrestore(&rq->lock, flags);
6732 break;
6733
6734 #ifdef CONFIG_HOTPLUG_CPU
6735 case CPU_UP_CANCELED:
6736 case CPU_UP_CANCELED_FROZEN:
6737 if (!cpu_rq(cpu)->migration_thread)
6738 break;
6739 /* Unbind it from offline cpu so it can run. Fall thru. */
6740 kthread_bind(cpu_rq(cpu)->migration_thread,
6741 cpumask_any(cpu_online_mask));
6742 kthread_stop(cpu_rq(cpu)->migration_thread);
6743 cpu_rq(cpu)->migration_thread = NULL;
6744 break;
6745
6746 case CPU_DEAD:
6747 case CPU_DEAD_FROZEN:
6748 cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
6749 migrate_live_tasks(cpu);
6750 rq = cpu_rq(cpu);
6751 kthread_stop(rq->migration_thread);
6752 rq->migration_thread = NULL;
6753 /* Idle task back to normal (off runqueue, low prio) */
6754 spin_lock_irq(&rq->lock);
6755 update_rq_clock(rq);
6756 deactivate_task(rq, rq->idle, 0);
6757 rq->idle->static_prio = MAX_PRIO;
6758 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
6759 rq->idle->sched_class = &idle_sched_class;
6760 migrate_dead_tasks(cpu);
6761 spin_unlock_irq(&rq->lock);
6762 cpuset_unlock();
6763 migrate_nr_uninterruptible(rq);
6764 BUG_ON(rq->nr_running != 0);
6765
6766 /*
6767 * No need to migrate the tasks: it was best-effort if
6768 * they didn't take sched_hotcpu_mutex. Just wake up
6769 * the requestors.
6770 */
6771 spin_lock_irq(&rq->lock);
6772 while (!list_empty(&rq->migration_queue)) {
6773 struct migration_req *req;
6774
6775 req = list_entry(rq->migration_queue.next,
6776 struct migration_req, list);
6777 list_del_init(&req->list);
6778 spin_unlock_irq(&rq->lock);
6779 complete(&req->done);
6780 spin_lock_irq(&rq->lock);
6781 }
6782 spin_unlock_irq(&rq->lock);
6783 break;
6784
6785 case CPU_DYING:
6786 case CPU_DYING_FROZEN:
6787 /* Update our root-domain */
6788 rq = cpu_rq(cpu);
6789 spin_lock_irqsave(&rq->lock, flags);
6790 if (rq->rd) {
6791 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6792 set_rq_offline(rq);
6793 }
6794 spin_unlock_irqrestore(&rq->lock, flags);
6795 break;
6796 #endif
6797 }
6798 return NOTIFY_OK;
6799 }
6800
6801 /* Register at highest priority so that task migration (migrate_all_tasks)
6802 * happens before everything else.
6803 */
6804 static struct notifier_block __cpuinitdata migration_notifier = {
6805 .notifier_call = migration_call,
6806 .priority = 10
6807 };
6808
migration_init(void)6809 static int __init migration_init(void)
6810 {
6811 void *cpu = (void *)(long)smp_processor_id();
6812 int err;
6813
6814 /* Start one for the boot CPU: */
6815 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6816 BUG_ON(err == NOTIFY_BAD);
6817 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6818 register_cpu_notifier(&migration_notifier);
6819
6820 return err;
6821 }
6822 early_initcall(migration_init);
6823 #endif
6824
6825 #ifdef CONFIG_SMP
6826
6827 #ifdef CONFIG_SCHED_DEBUG
6828
sched_domain_debug_one(struct sched_domain * sd,int cpu,int level,struct cpumask * groupmask)6829 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6830 struct cpumask *groupmask)
6831 {
6832 struct sched_group *group = sd->groups;
6833 char str[256];
6834
6835 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
6836 cpumask_clear(groupmask);
6837
6838 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6839
6840 if (!(sd->flags & SD_LOAD_BALANCE)) {
6841 printk("does not load-balance\n");
6842 if (sd->parent)
6843 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6844 " has parent");
6845 return -1;
6846 }
6847
6848 printk(KERN_CONT "span %s level %s\n", str, sd->name);
6849
6850 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
6851 printk(KERN_ERR "ERROR: domain->span does not contain "
6852 "CPU%d\n", cpu);
6853 }
6854 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
6855 printk(KERN_ERR "ERROR: domain->groups does not contain"
6856 " CPU%d\n", cpu);
6857 }
6858
6859 printk(KERN_DEBUG "%*s groups:", level + 1, "");
6860 do {
6861 if (!group) {
6862 printk("\n");
6863 printk(KERN_ERR "ERROR: group is NULL\n");
6864 break;
6865 }
6866
6867 if (!group->__cpu_power) {
6868 printk(KERN_CONT "\n");
6869 printk(KERN_ERR "ERROR: domain->cpu_power not "
6870 "set\n");
6871 break;
6872 }
6873
6874 if (!cpumask_weight(sched_group_cpus(group))) {
6875 printk(KERN_CONT "\n");
6876 printk(KERN_ERR "ERROR: empty group\n");
6877 break;
6878 }
6879
6880 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
6881 printk(KERN_CONT "\n");
6882 printk(KERN_ERR "ERROR: repeated CPUs\n");
6883 break;
6884 }
6885
6886 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
6887
6888 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
6889 printk(KERN_CONT " %s", str);
6890
6891 group = group->next;
6892 } while (group != sd->groups);
6893 printk(KERN_CONT "\n");
6894
6895 if (!cpumask_equal(sched_domain_span(sd), groupmask))
6896 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
6897
6898 if (sd->parent &&
6899 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
6900 printk(KERN_ERR "ERROR: parent span is not a superset "
6901 "of domain->span\n");
6902 return 0;
6903 }
6904
sched_domain_debug(struct sched_domain * sd,int cpu)6905 static void sched_domain_debug(struct sched_domain *sd, int cpu)
6906 {
6907 cpumask_var_t groupmask;
6908 int level = 0;
6909
6910 if (!sd) {
6911 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6912 return;
6913 }
6914
6915 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6916
6917 if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
6918 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
6919 return;
6920 }
6921
6922 for (;;) {
6923 if (sched_domain_debug_one(sd, cpu, level, groupmask))
6924 break;
6925 level++;
6926 sd = sd->parent;
6927 if (!sd)
6928 break;
6929 }
6930 free_cpumask_var(groupmask);
6931 }
6932 #else /* !CONFIG_SCHED_DEBUG */
6933 # define sched_domain_debug(sd, cpu) do { } while (0)
6934 #endif /* CONFIG_SCHED_DEBUG */
6935
sd_degenerate(struct sched_domain * sd)6936 static int sd_degenerate(struct sched_domain *sd)
6937 {
6938 if (cpumask_weight(sched_domain_span(sd)) == 1)
6939 return 1;
6940
6941 /* Following flags need at least 2 groups */
6942 if (sd->flags & (SD_LOAD_BALANCE |
6943 SD_BALANCE_NEWIDLE |
6944 SD_BALANCE_FORK |
6945 SD_BALANCE_EXEC |
6946 SD_SHARE_CPUPOWER |
6947 SD_SHARE_PKG_RESOURCES)) {
6948 if (sd->groups != sd->groups->next)
6949 return 0;
6950 }
6951
6952 /* Following flags don't use groups */
6953 if (sd->flags & (SD_WAKE_IDLE |
6954 SD_WAKE_AFFINE |
6955 SD_WAKE_BALANCE))
6956 return 0;
6957
6958 return 1;
6959 }
6960
6961 static int
sd_parent_degenerate(struct sched_domain * sd,struct sched_domain * parent)6962 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6963 {
6964 unsigned long cflags = sd->flags, pflags = parent->flags;
6965
6966 if (sd_degenerate(parent))
6967 return 1;
6968
6969 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
6970 return 0;
6971
6972 /* Does parent contain flags not in child? */
6973 /* WAKE_BALANCE is a subset of WAKE_AFFINE */
6974 if (cflags & SD_WAKE_AFFINE)
6975 pflags &= ~SD_WAKE_BALANCE;
6976 /* Flags needing groups don't count if only 1 group in parent */
6977 if (parent->groups == parent->groups->next) {
6978 pflags &= ~(SD_LOAD_BALANCE |
6979 SD_BALANCE_NEWIDLE |
6980 SD_BALANCE_FORK |
6981 SD_BALANCE_EXEC |
6982 SD_SHARE_CPUPOWER |
6983 SD_SHARE_PKG_RESOURCES);
6984 if (nr_node_ids == 1)
6985 pflags &= ~SD_SERIALIZE;
6986 }
6987 if (~cflags & pflags)
6988 return 0;
6989
6990 return 1;
6991 }
6992
free_rootdomain(struct root_domain * rd)6993 static void free_rootdomain(struct root_domain *rd)
6994 {
6995 cpupri_cleanup(&rd->cpupri);
6996
6997 free_cpumask_var(rd->rto_mask);
6998 free_cpumask_var(rd->online);
6999 free_cpumask_var(rd->span);
7000 kfree(rd);
7001 }
7002
rq_attach_root(struct rq * rq,struct root_domain * rd)7003 static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7004 {
7005 struct root_domain *old_rd = NULL;
7006 unsigned long flags;
7007
7008 spin_lock_irqsave(&rq->lock, flags);
7009
7010 if (rq->rd) {
7011 old_rd = rq->rd;
7012
7013 if (cpumask_test_cpu(rq->cpu, old_rd->online))
7014 set_rq_offline(rq);
7015
7016 cpumask_clear_cpu(rq->cpu, old_rd->span);
7017
7018 /*
7019 * If we dont want to free the old_rt yet then
7020 * set old_rd to NULL to skip the freeing later
7021 * in this function:
7022 */
7023 if (!atomic_dec_and_test(&old_rd->refcount))
7024 old_rd = NULL;
7025 }
7026
7027 atomic_inc(&rd->refcount);
7028 rq->rd = rd;
7029
7030 cpumask_set_cpu(rq->cpu, rd->span);
7031 if (cpumask_test_cpu(rq->cpu, cpu_online_mask))
7032 set_rq_online(rq);
7033
7034 spin_unlock_irqrestore(&rq->lock, flags);
7035
7036 if (old_rd)
7037 free_rootdomain(old_rd);
7038 }
7039
init_rootdomain(struct root_domain * rd,bool bootmem)7040 static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
7041 {
7042 memset(rd, 0, sizeof(*rd));
7043
7044 if (bootmem) {
7045 alloc_bootmem_cpumask_var(&def_root_domain.span);
7046 alloc_bootmem_cpumask_var(&def_root_domain.online);
7047 alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
7048 cpupri_init(&rd->cpupri, true);
7049 return 0;
7050 }
7051
7052 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
7053 goto out;
7054 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
7055 goto free_span;
7056 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
7057 goto free_online;
7058
7059 if (cpupri_init(&rd->cpupri, false) != 0)
7060 goto free_rto_mask;
7061 return 0;
7062
7063 free_rto_mask:
7064 free_cpumask_var(rd->rto_mask);
7065 free_online:
7066 free_cpumask_var(rd->online);
7067 free_span:
7068 free_cpumask_var(rd->span);
7069 out:
7070 return -ENOMEM;
7071 }
7072
init_defrootdomain(void)7073 static void init_defrootdomain(void)
7074 {
7075 init_rootdomain(&def_root_domain, true);
7076
7077 atomic_set(&def_root_domain.refcount, 1);
7078 }
7079
alloc_rootdomain(void)7080 static struct root_domain *alloc_rootdomain(void)
7081 {
7082 struct root_domain *rd;
7083
7084 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
7085 if (!rd)
7086 return NULL;
7087
7088 if (init_rootdomain(rd, false) != 0) {
7089 kfree(rd);
7090 return NULL;
7091 }
7092
7093 return rd;
7094 }
7095
7096 /*
7097 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
7098 * hold the hotplug lock.
7099 */
7100 static void
cpu_attach_domain(struct sched_domain * sd,struct root_domain * rd,int cpu)7101 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
7102 {
7103 struct rq *rq = cpu_rq(cpu);
7104 struct sched_domain *tmp;
7105
7106 /* Remove the sched domains which do not contribute to scheduling. */
7107 for (tmp = sd; tmp; ) {
7108 struct sched_domain *parent = tmp->parent;
7109 if (!parent)
7110 break;
7111
7112 if (sd_parent_degenerate(tmp, parent)) {
7113 tmp->parent = parent->parent;
7114 if (parent->parent)
7115 parent->parent->child = tmp;
7116 } else
7117 tmp = tmp->parent;
7118 }
7119
7120 if (sd && sd_degenerate(sd)) {
7121 sd = sd->parent;
7122 if (sd)
7123 sd->child = NULL;
7124 }
7125
7126 sched_domain_debug(sd, cpu);
7127
7128 rq_attach_root(rq, rd);
7129 rcu_assign_pointer(rq->sd, sd);
7130 }
7131
7132 /* cpus with isolated domains */
7133 static cpumask_var_t cpu_isolated_map;
7134
7135 /* Setup the mask of cpus configured for isolated domains */
isolated_cpu_setup(char * str)7136 static int __init isolated_cpu_setup(char *str)
7137 {
7138 cpulist_parse(str, cpu_isolated_map);
7139 return 1;
7140 }
7141
7142 __setup("isolcpus=", isolated_cpu_setup);
7143
7144 /*
7145 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
7146 * to a function which identifies what group(along with sched group) a CPU
7147 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
7148 * (due to the fact that we keep track of groups covered with a struct cpumask).
7149 *
7150 * init_sched_build_groups will build a circular linked list of the groups
7151 * covered by the given span, and will set each group's ->cpumask correctly,
7152 * and ->cpu_power to 0.
7153 */
7154 static void
init_sched_build_groups(const struct cpumask * span,const struct cpumask * cpu_map,int (* group_fn)(int cpu,const struct cpumask * cpu_map,struct sched_group ** sg,struct cpumask * tmpmask),struct cpumask * covered,struct cpumask * tmpmask)7155 init_sched_build_groups(const struct cpumask *span,
7156 const struct cpumask *cpu_map,
7157 int (*group_fn)(int cpu, const struct cpumask *cpu_map,
7158 struct sched_group **sg,
7159 struct cpumask *tmpmask),
7160 struct cpumask *covered, struct cpumask *tmpmask)
7161 {
7162 struct sched_group *first = NULL, *last = NULL;
7163 int i;
7164
7165 cpumask_clear(covered);
7166
7167 for_each_cpu(i, span) {
7168 struct sched_group *sg;
7169 int group = group_fn(i, cpu_map, &sg, tmpmask);
7170 int j;
7171
7172 if (cpumask_test_cpu(i, covered))
7173 continue;
7174
7175 cpumask_clear(sched_group_cpus(sg));
7176 sg->__cpu_power = 0;
7177
7178 for_each_cpu(j, span) {
7179 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
7180 continue;
7181
7182 cpumask_set_cpu(j, covered);
7183 cpumask_set_cpu(j, sched_group_cpus(sg));
7184 }
7185 if (!first)
7186 first = sg;
7187 if (last)
7188 last->next = sg;
7189 last = sg;
7190 }
7191 last->next = first;
7192 }
7193
7194 #define SD_NODES_PER_DOMAIN 16
7195
7196 #ifdef CONFIG_NUMA
7197
7198 /**
7199 * find_next_best_node - find the next node to include in a sched_domain
7200 * @node: node whose sched_domain we're building
7201 * @used_nodes: nodes already in the sched_domain
7202 *
7203 * Find the next node to include in a given scheduling domain. Simply
7204 * finds the closest node not already in the @used_nodes map.
7205 *
7206 * Should use nodemask_t.
7207 */
find_next_best_node(int node,nodemask_t * used_nodes)7208 static int find_next_best_node(int node, nodemask_t *used_nodes)
7209 {
7210 int i, n, val, min_val, best_node = 0;
7211
7212 min_val = INT_MAX;
7213
7214 for (i = 0; i < nr_node_ids; i++) {
7215 /* Start at @node */
7216 n = (node + i) % nr_node_ids;
7217
7218 if (!nr_cpus_node(n))
7219 continue;
7220
7221 /* Skip already used nodes */
7222 if (node_isset(n, *used_nodes))
7223 continue;
7224
7225 /* Simple min distance search */
7226 val = node_distance(node, n);
7227
7228 if (val < min_val) {
7229 min_val = val;
7230 best_node = n;
7231 }
7232 }
7233
7234 node_set(best_node, *used_nodes);
7235 return best_node;
7236 }
7237
7238 /**
7239 * sched_domain_node_span - get a cpumask for a node's sched_domain
7240 * @node: node whose cpumask we're constructing
7241 * @span: resulting cpumask
7242 *
7243 * Given a node, construct a good cpumask for its sched_domain to span. It
7244 * should be one that prevents unnecessary balancing, but also spreads tasks
7245 * out optimally.
7246 */
sched_domain_node_span(int node,struct cpumask * span)7247 static void sched_domain_node_span(int node, struct cpumask *span)
7248 {
7249 nodemask_t used_nodes;
7250 int i;
7251
7252 cpumask_clear(span);
7253 nodes_clear(used_nodes);
7254
7255 cpumask_or(span, span, cpumask_of_node(node));
7256 node_set(node, used_nodes);
7257
7258 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
7259 int next_node = find_next_best_node(node, &used_nodes);
7260
7261 cpumask_or(span, span, cpumask_of_node(next_node));
7262 }
7263 }
7264 #endif /* CONFIG_NUMA */
7265
7266 int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
7267
7268 /*
7269 * The cpus mask in sched_group and sched_domain hangs off the end.
7270 * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space
7271 * for nr_cpu_ids < CONFIG_NR_CPUS.
7272 */
7273 struct static_sched_group {
7274 struct sched_group sg;
7275 DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
7276 };
7277
7278 struct static_sched_domain {
7279 struct sched_domain sd;
7280 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
7281 };
7282
7283 /*
7284 * SMT sched-domains:
7285 */
7286 #ifdef CONFIG_SCHED_SMT
7287 static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
7288 static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
7289
7290 static int
cpu_to_cpu_group(int cpu,const struct cpumask * cpu_map,struct sched_group ** sg,struct cpumask * unused)7291 cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
7292 struct sched_group **sg, struct cpumask *unused)
7293 {
7294 if (sg)
7295 *sg = &per_cpu(sched_group_cpus, cpu).sg;
7296 return cpu;
7297 }
7298 #endif /* CONFIG_SCHED_SMT */
7299
7300 /*
7301 * multi-core sched-domains:
7302 */
7303 #ifdef CONFIG_SCHED_MC
7304 static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
7305 static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
7306 #endif /* CONFIG_SCHED_MC */
7307
7308 #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
7309 static int
cpu_to_core_group(int cpu,const struct cpumask * cpu_map,struct sched_group ** sg,struct cpumask * mask)7310 cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
7311 struct sched_group **sg, struct cpumask *mask)
7312 {
7313 int group;
7314
7315 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
7316 group = cpumask_first(mask);
7317 if (sg)
7318 *sg = &per_cpu(sched_group_core, group).sg;
7319 return group;
7320 }
7321 #elif defined(CONFIG_SCHED_MC)
7322 static int
cpu_to_core_group(int cpu,const struct cpumask * cpu_map,struct sched_group ** sg,struct cpumask * unused)7323 cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
7324 struct sched_group **sg, struct cpumask *unused)
7325 {
7326 if (sg)
7327 *sg = &per_cpu(sched_group_core, cpu).sg;
7328 return cpu;
7329 }
7330 #endif
7331
7332 static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
7333 static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
7334
7335 static int
cpu_to_phys_group(int cpu,const struct cpumask * cpu_map,struct sched_group ** sg,struct cpumask * mask)7336 cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7337 struct sched_group **sg, struct cpumask *mask)
7338 {
7339 int group;
7340 #ifdef CONFIG_SCHED_MC
7341 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
7342 group = cpumask_first(mask);
7343 #elif defined(CONFIG_SCHED_SMT)
7344 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
7345 group = cpumask_first(mask);
7346 #else
7347 group = cpu;
7348 #endif
7349 if (sg)
7350 *sg = &per_cpu(sched_group_phys, group).sg;
7351 return group;
7352 }
7353
7354 #ifdef CONFIG_NUMA
7355 /*
7356 * The init_sched_build_groups can't handle what we want to do with node
7357 * groups, so roll our own. Now each node has its own list of groups which
7358 * gets dynamically allocated.
7359 */
7360 static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
7361 static struct sched_group ***sched_group_nodes_bycpu;
7362
7363 static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
7364 static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
7365
cpu_to_allnodes_group(int cpu,const struct cpumask * cpu_map,struct sched_group ** sg,struct cpumask * nodemask)7366 static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
7367 struct sched_group **sg,
7368 struct cpumask *nodemask)
7369 {
7370 int group;
7371
7372 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
7373 group = cpumask_first(nodemask);
7374
7375 if (sg)
7376 *sg = &per_cpu(sched_group_allnodes, group).sg;
7377 return group;
7378 }
7379
init_numa_sched_groups_power(struct sched_group * group_head)7380 static void init_numa_sched_groups_power(struct sched_group *group_head)
7381 {
7382 struct sched_group *sg = group_head;
7383 int j;
7384
7385 if (!sg)
7386 return;
7387 do {
7388 for_each_cpu(j, sched_group_cpus(sg)) {
7389 struct sched_domain *sd;
7390
7391 sd = &per_cpu(phys_domains, j).sd;
7392 if (j != cpumask_first(sched_group_cpus(sd->groups))) {
7393 /*
7394 * Only add "power" once for each
7395 * physical package.
7396 */
7397 continue;
7398 }
7399
7400 sg_inc_cpu_power(sg, sd->groups->__cpu_power);
7401 }
7402 sg = sg->next;
7403 } while (sg != group_head);
7404 }
7405 #endif /* CONFIG_NUMA */
7406
7407 #ifdef CONFIG_NUMA
7408 /* Free memory allocated for various sched_group structures */
free_sched_groups(const struct cpumask * cpu_map,struct cpumask * nodemask)7409 static void free_sched_groups(const struct cpumask *cpu_map,
7410 struct cpumask *nodemask)
7411 {
7412 int cpu, i;
7413
7414 for_each_cpu(cpu, cpu_map) {
7415 struct sched_group **sched_group_nodes
7416 = sched_group_nodes_bycpu[cpu];
7417
7418 if (!sched_group_nodes)
7419 continue;
7420
7421 for (i = 0; i < nr_node_ids; i++) {
7422 struct sched_group *oldsg, *sg = sched_group_nodes[i];
7423
7424 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
7425 if (cpumask_empty(nodemask))
7426 continue;
7427
7428 if (sg == NULL)
7429 continue;
7430 sg = sg->next;
7431 next_sg:
7432 oldsg = sg;
7433 sg = sg->next;
7434 kfree(oldsg);
7435 if (oldsg != sched_group_nodes[i])
7436 goto next_sg;
7437 }
7438 kfree(sched_group_nodes);
7439 sched_group_nodes_bycpu[cpu] = NULL;
7440 }
7441 }
7442 #else /* !CONFIG_NUMA */
free_sched_groups(const struct cpumask * cpu_map,struct cpumask * nodemask)7443 static void free_sched_groups(const struct cpumask *cpu_map,
7444 struct cpumask *nodemask)
7445 {
7446 }
7447 #endif /* CONFIG_NUMA */
7448
7449 /*
7450 * Initialize sched groups cpu_power.
7451 *
7452 * cpu_power indicates the capacity of sched group, which is used while
7453 * distributing the load between different sched groups in a sched domain.
7454 * Typically cpu_power for all the groups in a sched domain will be same unless
7455 * there are asymmetries in the topology. If there are asymmetries, group
7456 * having more cpu_power will pickup more load compared to the group having
7457 * less cpu_power.
7458 *
7459 * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
7460 * the maximum number of tasks a group can handle in the presence of other idle
7461 * or lightly loaded groups in the same sched domain.
7462 */
init_sched_groups_power(int cpu,struct sched_domain * sd)7463 static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7464 {
7465 struct sched_domain *child;
7466 struct sched_group *group;
7467
7468 WARN_ON(!sd || !sd->groups);
7469
7470 if (cpu != cpumask_first(sched_group_cpus(sd->groups)))
7471 return;
7472
7473 child = sd->child;
7474
7475 sd->groups->__cpu_power = 0;
7476
7477 /*
7478 * For perf policy, if the groups in child domain share resources
7479 * (for example cores sharing some portions of the cache hierarchy
7480 * or SMT), then set this domain groups cpu_power such that each group
7481 * can handle only one task, when there are other idle groups in the
7482 * same sched domain.
7483 */
7484 if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
7485 (child->flags &
7486 (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
7487 sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
7488 return;
7489 }
7490
7491 /*
7492 * add cpu_power of each child group to this groups cpu_power
7493 */
7494 group = child->groups;
7495 do {
7496 sg_inc_cpu_power(sd->groups, group->__cpu_power);
7497 group = group->next;
7498 } while (group != child->groups);
7499 }
7500
7501 /*
7502 * Initializers for schedule domains
7503 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
7504 */
7505
7506 #ifdef CONFIG_SCHED_DEBUG
7507 # define SD_INIT_NAME(sd, type) sd->name = #type
7508 #else
7509 # define SD_INIT_NAME(sd, type) do { } while (0)
7510 #endif
7511
7512 #define SD_INIT(sd, type) sd_init_##type(sd)
7513
7514 #define SD_INIT_FUNC(type) \
7515 static noinline void sd_init_##type(struct sched_domain *sd) \
7516 { \
7517 memset(sd, 0, sizeof(*sd)); \
7518 *sd = SD_##type##_INIT; \
7519 sd->level = SD_LV_##type; \
7520 SD_INIT_NAME(sd, type); \
7521 }
7522
7523 SD_INIT_FUNC(CPU)
7524 #ifdef CONFIG_NUMA
7525 SD_INIT_FUNC(ALLNODES)
7526 SD_INIT_FUNC(NODE)
7527 #endif
7528 #ifdef CONFIG_SCHED_SMT
7529 SD_INIT_FUNC(SIBLING)
7530 #endif
7531 #ifdef CONFIG_SCHED_MC
7532 SD_INIT_FUNC(MC)
7533 #endif
7534
7535 static int default_relax_domain_level = -1;
7536
setup_relax_domain_level(char * str)7537 static int __init setup_relax_domain_level(char *str)
7538 {
7539 unsigned long val;
7540
7541 val = simple_strtoul(str, NULL, 0);
7542 if (val < SD_LV_MAX)
7543 default_relax_domain_level = val;
7544
7545 return 1;
7546 }
7547 __setup("relax_domain_level=", setup_relax_domain_level);
7548
set_domain_attribute(struct sched_domain * sd,struct sched_domain_attr * attr)7549 static void set_domain_attribute(struct sched_domain *sd,
7550 struct sched_domain_attr *attr)
7551 {
7552 int request;
7553
7554 if (!attr || attr->relax_domain_level < 0) {
7555 if (default_relax_domain_level < 0)
7556 return;
7557 else
7558 request = default_relax_domain_level;
7559 } else
7560 request = attr->relax_domain_level;
7561 if (request < sd->level) {
7562 /* turn off idle balance on this domain */
7563 sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
7564 } else {
7565 /* turn on idle balance on this domain */
7566 sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
7567 }
7568 }
7569
7570 /*
7571 * Build sched domains for a given set of cpus and attach the sched domains
7572 * to the individual cpus
7573 */
__build_sched_domains(const struct cpumask * cpu_map,struct sched_domain_attr * attr)7574 static int __build_sched_domains(const struct cpumask *cpu_map,
7575 struct sched_domain_attr *attr)
7576 {
7577 int i, err = -ENOMEM;
7578 struct root_domain *rd;
7579 cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
7580 tmpmask;
7581 #ifdef CONFIG_NUMA
7582 cpumask_var_t domainspan, covered, notcovered;
7583 struct sched_group **sched_group_nodes = NULL;
7584 int sd_allnodes = 0;
7585
7586 if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
7587 goto out;
7588 if (!alloc_cpumask_var(&covered, GFP_KERNEL))
7589 goto free_domainspan;
7590 if (!alloc_cpumask_var(¬covered, GFP_KERNEL))
7591 goto free_covered;
7592 #endif
7593
7594 if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
7595 goto free_notcovered;
7596 if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
7597 goto free_nodemask;
7598 if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
7599 goto free_this_sibling_map;
7600 if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
7601 goto free_this_core_map;
7602 if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
7603 goto free_send_covered;
7604
7605 #ifdef CONFIG_NUMA
7606 /*
7607 * Allocate the per-node list of sched groups
7608 */
7609 sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
7610 GFP_KERNEL);
7611 if (!sched_group_nodes) {
7612 printk(KERN_WARNING "Can not alloc sched group node list\n");
7613 goto free_tmpmask;
7614 }
7615 #endif
7616
7617 rd = alloc_rootdomain();
7618 if (!rd) {
7619 printk(KERN_WARNING "Cannot alloc root domain\n");
7620 goto free_sched_groups;
7621 }
7622
7623 #ifdef CONFIG_NUMA
7624 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes;
7625 #endif
7626
7627 /*
7628 * Set up domains for cpus specified by the cpu_map.
7629 */
7630 for_each_cpu(i, cpu_map) {
7631 struct sched_domain *sd = NULL, *p;
7632
7633 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
7634
7635 #ifdef CONFIG_NUMA
7636 if (cpumask_weight(cpu_map) >
7637 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
7638 sd = &per_cpu(allnodes_domains, i).sd;
7639 SD_INIT(sd, ALLNODES);
7640 set_domain_attribute(sd, attr);
7641 cpumask_copy(sched_domain_span(sd), cpu_map);
7642 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
7643 p = sd;
7644 sd_allnodes = 1;
7645 } else
7646 p = NULL;
7647
7648 sd = &per_cpu(node_domains, i).sd;
7649 SD_INIT(sd, NODE);
7650 set_domain_attribute(sd, attr);
7651 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
7652 sd->parent = p;
7653 if (p)
7654 p->child = sd;
7655 cpumask_and(sched_domain_span(sd),
7656 sched_domain_span(sd), cpu_map);
7657 #endif
7658
7659 p = sd;
7660 sd = &per_cpu(phys_domains, i).sd;
7661 SD_INIT(sd, CPU);
7662 set_domain_attribute(sd, attr);
7663 cpumask_copy(sched_domain_span(sd), nodemask);
7664 sd->parent = p;
7665 if (p)
7666 p->child = sd;
7667 cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
7668
7669 #ifdef CONFIG_SCHED_MC
7670 p = sd;
7671 sd = &per_cpu(core_domains, i).sd;
7672 SD_INIT(sd, MC);
7673 set_domain_attribute(sd, attr);
7674 cpumask_and(sched_domain_span(sd), cpu_map,
7675 cpu_coregroup_mask(i));
7676 sd->parent = p;
7677 p->child = sd;
7678 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
7679 #endif
7680
7681 #ifdef CONFIG_SCHED_SMT
7682 p = sd;
7683 sd = &per_cpu(cpu_domains, i).sd;
7684 SD_INIT(sd, SIBLING);
7685 set_domain_attribute(sd, attr);
7686 cpumask_and(sched_domain_span(sd),
7687 &per_cpu(cpu_sibling_map, i), cpu_map);
7688 sd->parent = p;
7689 p->child = sd;
7690 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
7691 #endif
7692 }
7693
7694 #ifdef CONFIG_SCHED_SMT
7695 /* Set up CPU (sibling) groups */
7696 for_each_cpu(i, cpu_map) {
7697 cpumask_and(this_sibling_map,
7698 &per_cpu(cpu_sibling_map, i), cpu_map);
7699 if (i != cpumask_first(this_sibling_map))
7700 continue;
7701
7702 init_sched_build_groups(this_sibling_map, cpu_map,
7703 &cpu_to_cpu_group,
7704 send_covered, tmpmask);
7705 }
7706 #endif
7707
7708 #ifdef CONFIG_SCHED_MC
7709 /* Set up multi-core groups */
7710 for_each_cpu(i, cpu_map) {
7711 cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map);
7712 if (i != cpumask_first(this_core_map))
7713 continue;
7714
7715 init_sched_build_groups(this_core_map, cpu_map,
7716 &cpu_to_core_group,
7717 send_covered, tmpmask);
7718 }
7719 #endif
7720
7721 /* Set up physical groups */
7722 for (i = 0; i < nr_node_ids; i++) {
7723 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
7724 if (cpumask_empty(nodemask))
7725 continue;
7726
7727 init_sched_build_groups(nodemask, cpu_map,
7728 &cpu_to_phys_group,
7729 send_covered, tmpmask);
7730 }
7731
7732 #ifdef CONFIG_NUMA
7733 /* Set up node groups */
7734 if (sd_allnodes) {
7735 init_sched_build_groups(cpu_map, cpu_map,
7736 &cpu_to_allnodes_group,
7737 send_covered, tmpmask);
7738 }
7739
7740 for (i = 0; i < nr_node_ids; i++) {
7741 /* Set up node groups */
7742 struct sched_group *sg, *prev;
7743 int j;
7744
7745 cpumask_clear(covered);
7746 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
7747 if (cpumask_empty(nodemask)) {
7748 sched_group_nodes[i] = NULL;
7749 continue;
7750 }
7751
7752 sched_domain_node_span(i, domainspan);
7753 cpumask_and(domainspan, domainspan, cpu_map);
7754
7755 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
7756 GFP_KERNEL, i);
7757 if (!sg) {
7758 printk(KERN_WARNING "Can not alloc domain group for "
7759 "node %d\n", i);
7760 goto error;
7761 }
7762 sched_group_nodes[i] = sg;
7763 for_each_cpu(j, nodemask) {
7764 struct sched_domain *sd;
7765
7766 sd = &per_cpu(node_domains, j).sd;
7767 sd->groups = sg;
7768 }
7769 sg->__cpu_power = 0;
7770 cpumask_copy(sched_group_cpus(sg), nodemask);
7771 sg->next = sg;
7772 cpumask_or(covered, covered, nodemask);
7773 prev = sg;
7774
7775 for (j = 0; j < nr_node_ids; j++) {
7776 int n = (i + j) % nr_node_ids;
7777
7778 cpumask_complement(notcovered, covered);
7779 cpumask_and(tmpmask, notcovered, cpu_map);
7780 cpumask_and(tmpmask, tmpmask, domainspan);
7781 if (cpumask_empty(tmpmask))
7782 break;
7783
7784 cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
7785 if (cpumask_empty(tmpmask))
7786 continue;
7787
7788 sg = kmalloc_node(sizeof(struct sched_group) +
7789 cpumask_size(),
7790 GFP_KERNEL, i);
7791 if (!sg) {
7792 printk(KERN_WARNING
7793 "Can not alloc domain group for node %d\n", j);
7794 goto error;
7795 }
7796 sg->__cpu_power = 0;
7797 cpumask_copy(sched_group_cpus(sg), tmpmask);
7798 sg->next = prev->next;
7799 cpumask_or(covered, covered, tmpmask);
7800 prev->next = sg;
7801 prev = sg;
7802 }
7803 }
7804 #endif
7805
7806 /* Calculate CPU power for physical packages and nodes */
7807 #ifdef CONFIG_SCHED_SMT
7808 for_each_cpu(i, cpu_map) {
7809 struct sched_domain *sd = &per_cpu(cpu_domains, i).sd;
7810
7811 init_sched_groups_power(i, sd);
7812 }
7813 #endif
7814 #ifdef CONFIG_SCHED_MC
7815 for_each_cpu(i, cpu_map) {
7816 struct sched_domain *sd = &per_cpu(core_domains, i).sd;
7817
7818 init_sched_groups_power(i, sd);
7819 }
7820 #endif
7821
7822 for_each_cpu(i, cpu_map) {
7823 struct sched_domain *sd = &per_cpu(phys_domains, i).sd;
7824
7825 init_sched_groups_power(i, sd);
7826 }
7827
7828 #ifdef CONFIG_NUMA
7829 for (i = 0; i < nr_node_ids; i++)
7830 init_numa_sched_groups_power(sched_group_nodes[i]);
7831
7832 if (sd_allnodes) {
7833 struct sched_group *sg;
7834
7835 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
7836 tmpmask);
7837 init_numa_sched_groups_power(sg);
7838 }
7839 #endif
7840
7841 /* Attach the domains */
7842 for_each_cpu(i, cpu_map) {
7843 struct sched_domain *sd;
7844 #ifdef CONFIG_SCHED_SMT
7845 sd = &per_cpu(cpu_domains, i).sd;
7846 #elif defined(CONFIG_SCHED_MC)
7847 sd = &per_cpu(core_domains, i).sd;
7848 #else
7849 sd = &per_cpu(phys_domains, i).sd;
7850 #endif
7851 cpu_attach_domain(sd, rd, i);
7852 }
7853
7854 err = 0;
7855
7856 free_tmpmask:
7857 free_cpumask_var(tmpmask);
7858 free_send_covered:
7859 free_cpumask_var(send_covered);
7860 free_this_core_map:
7861 free_cpumask_var(this_core_map);
7862 free_this_sibling_map:
7863 free_cpumask_var(this_sibling_map);
7864 free_nodemask:
7865 free_cpumask_var(nodemask);
7866 free_notcovered:
7867 #ifdef CONFIG_NUMA
7868 free_cpumask_var(notcovered);
7869 free_covered:
7870 free_cpumask_var(covered);
7871 free_domainspan:
7872 free_cpumask_var(domainspan);
7873 out:
7874 #endif
7875 return err;
7876
7877 free_sched_groups:
7878 #ifdef CONFIG_NUMA
7879 kfree(sched_group_nodes);
7880 #endif
7881 goto free_tmpmask;
7882
7883 #ifdef CONFIG_NUMA
7884 error:
7885 free_sched_groups(cpu_map, tmpmask);
7886 free_rootdomain(rd);
7887 goto free_tmpmask;
7888 #endif
7889 }
7890
build_sched_domains(const struct cpumask * cpu_map)7891 static int build_sched_domains(const struct cpumask *cpu_map)
7892 {
7893 return __build_sched_domains(cpu_map, NULL);
7894 }
7895
7896 static struct cpumask *doms_cur; /* current sched domains */
7897 static int ndoms_cur; /* number of sched domains in 'doms_cur' */
7898 static struct sched_domain_attr *dattr_cur;
7899 /* attribues of custom domains in 'doms_cur' */
7900
7901 /*
7902 * Special case: If a kmalloc of a doms_cur partition (array of
7903 * cpumask) fails, then fallback to a single sched domain,
7904 * as determined by the single cpumask fallback_doms.
7905 */
7906 static cpumask_var_t fallback_doms;
7907
7908 /*
7909 * arch_update_cpu_topology lets virtualized architectures update the
7910 * cpu core maps. It is supposed to return 1 if the topology changed
7911 * or 0 if it stayed the same.
7912 */
arch_update_cpu_topology(void)7913 int __attribute__((weak)) arch_update_cpu_topology(void)
7914 {
7915 return 0;
7916 }
7917
7918 /*
7919 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7920 * For now this just excludes isolated cpus, but could be used to
7921 * exclude other special cases in the future.
7922 */
arch_init_sched_domains(const struct cpumask * cpu_map)7923 static int arch_init_sched_domains(const struct cpumask *cpu_map)
7924 {
7925 int err;
7926
7927 arch_update_cpu_topology();
7928 ndoms_cur = 1;
7929 doms_cur = kmalloc(cpumask_size(), GFP_KERNEL);
7930 if (!doms_cur)
7931 doms_cur = fallback_doms;
7932 cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map);
7933 dattr_cur = NULL;
7934 err = build_sched_domains(doms_cur);
7935 register_sched_domain_sysctl();
7936
7937 return err;
7938 }
7939
arch_destroy_sched_domains(const struct cpumask * cpu_map,struct cpumask * tmpmask)7940 static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
7941 struct cpumask *tmpmask)
7942 {
7943 free_sched_groups(cpu_map, tmpmask);
7944 }
7945
7946 /*
7947 * Detach sched domains from a group of cpus specified in cpu_map
7948 * These cpus will now be attached to the NULL domain
7949 */
detach_destroy_domains(const struct cpumask * cpu_map)7950 static void detach_destroy_domains(const struct cpumask *cpu_map)
7951 {
7952 /* Save because hotplug lock held. */
7953 static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
7954 int i;
7955
7956 for_each_cpu(i, cpu_map)
7957 cpu_attach_domain(NULL, &def_root_domain, i);
7958 synchronize_sched();
7959 arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
7960 }
7961
7962 /* handle null as "default" */
dattrs_equal(struct sched_domain_attr * cur,int idx_cur,struct sched_domain_attr * new,int idx_new)7963 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7964 struct sched_domain_attr *new, int idx_new)
7965 {
7966 struct sched_domain_attr tmp;
7967
7968 /* fast path */
7969 if (!new && !cur)
7970 return 1;
7971
7972 tmp = SD_ATTR_INIT;
7973 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7974 new ? (new + idx_new) : &tmp,
7975 sizeof(struct sched_domain_attr));
7976 }
7977
7978 /*
7979 * Partition sched domains as specified by the 'ndoms_new'
7980 * cpumasks in the array doms_new[] of cpumasks. This compares
7981 * doms_new[] to the current sched domain partitioning, doms_cur[].
7982 * It destroys each deleted domain and builds each new domain.
7983 *
7984 * 'doms_new' is an array of cpumask's of length 'ndoms_new'.
7985 * The masks don't intersect (don't overlap.) We should setup one
7986 * sched domain for each mask. CPUs not in any of the cpumasks will
7987 * not be load balanced. If the same cpumask appears both in the
7988 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7989 * it as it is.
7990 *
7991 * The passed in 'doms_new' should be kmalloc'd. This routine takes
7992 * ownership of it and will kfree it when done with it. If the caller
7993 * failed the kmalloc call, then it can pass in doms_new == NULL &&
7994 * ndoms_new == 1, and partition_sched_domains() will fallback to
7995 * the single partition 'fallback_doms', it also forces the domains
7996 * to be rebuilt.
7997 *
7998 * If doms_new == NULL it will be replaced with cpu_online_mask.
7999 * ndoms_new == 0 is a special case for destroying existing domains,
8000 * and it will not create the default domain.
8001 *
8002 * Call with hotplug lock held
8003 */
8004 /* FIXME: Change to struct cpumask *doms_new[] */
partition_sched_domains(int ndoms_new,struct cpumask * doms_new,struct sched_domain_attr * dattr_new)8005 void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
8006 struct sched_domain_attr *dattr_new)
8007 {
8008 int i, j, n;
8009 int new_topology;
8010
8011 mutex_lock(&sched_domains_mutex);
8012
8013 /* always unregister in case we don't destroy any domains */
8014 unregister_sched_domain_sysctl();
8015
8016 /* Let architecture update cpu core mappings. */
8017 new_topology = arch_update_cpu_topology();
8018
8019 n = doms_new ? ndoms_new : 0;
8020
8021 /* Destroy deleted domains */
8022 for (i = 0; i < ndoms_cur; i++) {
8023 for (j = 0; j < n && !new_topology; j++) {
8024 if (cpumask_equal(&doms_cur[i], &doms_new[j])
8025 && dattrs_equal(dattr_cur, i, dattr_new, j))
8026 goto match1;
8027 }
8028 /* no match - a current sched domain not in new doms_new[] */
8029 detach_destroy_domains(doms_cur + i);
8030 match1:
8031 ;
8032 }
8033
8034 if (doms_new == NULL) {
8035 ndoms_cur = 0;
8036 doms_new = fallback_doms;
8037 cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
8038 WARN_ON_ONCE(dattr_new);
8039 }
8040
8041 /* Build new domains */
8042 for (i = 0; i < ndoms_new; i++) {
8043 for (j = 0; j < ndoms_cur && !new_topology; j++) {
8044 if (cpumask_equal(&doms_new[i], &doms_cur[j])
8045 && dattrs_equal(dattr_new, i, dattr_cur, j))
8046 goto match2;
8047 }
8048 /* no match - add a new doms_new */
8049 __build_sched_domains(doms_new + i,
8050 dattr_new ? dattr_new + i : NULL);
8051 match2:
8052 ;
8053 }
8054
8055 /* Remember the new sched domains */
8056 if (doms_cur != fallback_doms)
8057 kfree(doms_cur);
8058 kfree(dattr_cur); /* kfree(NULL) is safe */
8059 doms_cur = doms_new;
8060 dattr_cur = dattr_new;
8061 ndoms_cur = ndoms_new;
8062
8063 register_sched_domain_sysctl();
8064
8065 mutex_unlock(&sched_domains_mutex);
8066 }
8067
8068 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
arch_reinit_sched_domains(void)8069 static void arch_reinit_sched_domains(void)
8070 {
8071 get_online_cpus();
8072
8073 /* Destroy domains first to force the rebuild */
8074 partition_sched_domains(0, NULL, NULL);
8075
8076 rebuild_sched_domains();
8077 put_online_cpus();
8078 }
8079
sched_power_savings_store(const char * buf,size_t count,int smt)8080 static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
8081 {
8082 unsigned int level = 0;
8083
8084 if (sscanf(buf, "%u", &level) != 1)
8085 return -EINVAL;
8086
8087 /*
8088 * level is always be positive so don't check for
8089 * level < POWERSAVINGS_BALANCE_NONE which is 0
8090 * What happens on 0 or 1 byte write,
8091 * need to check for count as well?
8092 */
8093
8094 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
8095 return -EINVAL;
8096
8097 if (smt)
8098 sched_smt_power_savings = level;
8099 else
8100 sched_mc_power_savings = level;
8101
8102 arch_reinit_sched_domains();
8103
8104 return count;
8105 }
8106
8107 #ifdef CONFIG_SCHED_MC
sched_mc_power_savings_show(struct sysdev_class * class,char * page)8108 static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
8109 char *page)
8110 {
8111 return sprintf(page, "%u\n", sched_mc_power_savings);
8112 }
sched_mc_power_savings_store(struct sysdev_class * class,const char * buf,size_t count)8113 static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
8114 const char *buf, size_t count)
8115 {
8116 return sched_power_savings_store(buf, count, 0);
8117 }
8118 static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
8119 sched_mc_power_savings_show,
8120 sched_mc_power_savings_store);
8121 #endif
8122
8123 #ifdef CONFIG_SCHED_SMT
sched_smt_power_savings_show(struct sysdev_class * dev,char * page)8124 static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
8125 char *page)
8126 {
8127 return sprintf(page, "%u\n", sched_smt_power_savings);
8128 }
sched_smt_power_savings_store(struct sysdev_class * dev,const char * buf,size_t count)8129 static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
8130 const char *buf, size_t count)
8131 {
8132 return sched_power_savings_store(buf, count, 1);
8133 }
8134 static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
8135 sched_smt_power_savings_show,
8136 sched_smt_power_savings_store);
8137 #endif
8138
sched_create_sysfs_power_savings_entries(struct sysdev_class * cls)8139 int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
8140 {
8141 int err = 0;
8142
8143 #ifdef CONFIG_SCHED_SMT
8144 if (smt_capable())
8145 err = sysfs_create_file(&cls->kset.kobj,
8146 &attr_sched_smt_power_savings.attr);
8147 #endif
8148 #ifdef CONFIG_SCHED_MC
8149 if (!err && mc_capable())
8150 err = sysfs_create_file(&cls->kset.kobj,
8151 &attr_sched_mc_power_savings.attr);
8152 #endif
8153 return err;
8154 }
8155 #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
8156
8157 #ifndef CONFIG_CPUSETS
8158 /*
8159 * Add online and remove offline CPUs from the scheduler domains.
8160 * When cpusets are enabled they take over this function.
8161 */
update_sched_domains(struct notifier_block * nfb,unsigned long action,void * hcpu)8162 static int update_sched_domains(struct notifier_block *nfb,
8163 unsigned long action, void *hcpu)
8164 {
8165 switch (action) {
8166 case CPU_ONLINE:
8167 case CPU_ONLINE_FROZEN:
8168 case CPU_DEAD:
8169 case CPU_DEAD_FROZEN:
8170 partition_sched_domains(1, NULL, NULL);
8171 return NOTIFY_OK;
8172
8173 default:
8174 return NOTIFY_DONE;
8175 }
8176 }
8177 #endif
8178
update_runtime(struct notifier_block * nfb,unsigned long action,void * hcpu)8179 static int update_runtime(struct notifier_block *nfb,
8180 unsigned long action, void *hcpu)
8181 {
8182 int cpu = (int)(long)hcpu;
8183
8184 switch (action) {
8185 case CPU_DOWN_PREPARE:
8186 case CPU_DOWN_PREPARE_FROZEN:
8187 disable_runtime(cpu_rq(cpu));
8188 return NOTIFY_OK;
8189
8190 case CPU_DOWN_FAILED:
8191 case CPU_DOWN_FAILED_FROZEN:
8192 case CPU_ONLINE:
8193 case CPU_ONLINE_FROZEN:
8194 enable_runtime(cpu_rq(cpu));
8195 return NOTIFY_OK;
8196
8197 default:
8198 return NOTIFY_DONE;
8199 }
8200 }
8201
sched_init_smp(void)8202 void __init sched_init_smp(void)
8203 {
8204 cpumask_var_t non_isolated_cpus;
8205
8206 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
8207
8208 #if defined(CONFIG_NUMA)
8209 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
8210 GFP_KERNEL);
8211 BUG_ON(sched_group_nodes_bycpu == NULL);
8212 #endif
8213 get_online_cpus();
8214 mutex_lock(&sched_domains_mutex);
8215 arch_init_sched_domains(cpu_online_mask);
8216 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
8217 if (cpumask_empty(non_isolated_cpus))
8218 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
8219 mutex_unlock(&sched_domains_mutex);
8220 put_online_cpus();
8221
8222 #ifndef CONFIG_CPUSETS
8223 /* XXX: Theoretical race here - CPU may be hotplugged now */
8224 hotcpu_notifier(update_sched_domains, 0);
8225 #endif
8226
8227 /* RT runtime code needs to handle some hotplug events */
8228 hotcpu_notifier(update_runtime, 0);
8229
8230 init_hrtick();
8231
8232 /* Move init over to a non-isolated CPU */
8233 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
8234 BUG();
8235 sched_init_granularity();
8236 free_cpumask_var(non_isolated_cpus);
8237
8238 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
8239 init_sched_rt_class();
8240 }
8241 #else
sched_init_smp(void)8242 void __init sched_init_smp(void)
8243 {
8244 sched_init_granularity();
8245 }
8246 #endif /* CONFIG_SMP */
8247
in_sched_functions(unsigned long addr)8248 int in_sched_functions(unsigned long addr)
8249 {
8250 return in_lock_functions(addr) ||
8251 (addr >= (unsigned long)__sched_text_start
8252 && addr < (unsigned long)__sched_text_end);
8253 }
8254
init_cfs_rq(struct cfs_rq * cfs_rq,struct rq * rq)8255 static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
8256 {
8257 cfs_rq->tasks_timeline = RB_ROOT;
8258 INIT_LIST_HEAD(&cfs_rq->tasks);
8259 #ifdef CONFIG_FAIR_GROUP_SCHED
8260 cfs_rq->rq = rq;
8261 #endif
8262 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
8263 }
8264
init_rt_rq(struct rt_rq * rt_rq,struct rq * rq)8265 static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8266 {
8267 struct rt_prio_array *array;
8268 int i;
8269
8270 array = &rt_rq->active;
8271 for (i = 0; i < MAX_RT_PRIO; i++) {
8272 INIT_LIST_HEAD(array->queue + i);
8273 __clear_bit(i, array->bitmap);
8274 }
8275 /* delimiter for bitsearch: */
8276 __set_bit(MAX_RT_PRIO, array->bitmap);
8277
8278 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
8279 rt_rq->highest_prio = MAX_RT_PRIO;
8280 #endif
8281 #ifdef CONFIG_SMP
8282 rt_rq->rt_nr_migratory = 0;
8283 rt_rq->overloaded = 0;
8284 #endif
8285
8286 rt_rq->rt_time = 0;
8287 rt_rq->rt_throttled = 0;
8288 rt_rq->rt_runtime = 0;
8289 spin_lock_init(&rt_rq->rt_runtime_lock);
8290
8291 #ifdef CONFIG_RT_GROUP_SCHED
8292 rt_rq->rt_nr_boosted = 0;
8293 rt_rq->rq = rq;
8294 #endif
8295 }
8296
8297 #ifdef CONFIG_FAIR_GROUP_SCHED
init_tg_cfs_entry(struct task_group * tg,struct cfs_rq * cfs_rq,struct sched_entity * se,int cpu,int add,struct sched_entity * parent)8298 static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8299 struct sched_entity *se, int cpu, int add,
8300 struct sched_entity *parent)
8301 {
8302 struct rq *rq = cpu_rq(cpu);
8303 tg->cfs_rq[cpu] = cfs_rq;
8304 init_cfs_rq(cfs_rq, rq);
8305 cfs_rq->tg = tg;
8306 if (add)
8307 list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
8308
8309 tg->se[cpu] = se;
8310 /* se could be NULL for init_task_group */
8311 if (!se)
8312 return;
8313
8314 if (!parent)
8315 se->cfs_rq = &rq->cfs;
8316 else
8317 se->cfs_rq = parent->my_q;
8318
8319 se->my_q = cfs_rq;
8320 se->load.weight = tg->shares;
8321 se->load.inv_weight = 0;
8322 se->parent = parent;
8323 }
8324 #endif
8325
8326 #ifdef CONFIG_RT_GROUP_SCHED
init_tg_rt_entry(struct task_group * tg,struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int cpu,int add,struct sched_rt_entity * parent)8327 static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
8328 struct sched_rt_entity *rt_se, int cpu, int add,
8329 struct sched_rt_entity *parent)
8330 {
8331 struct rq *rq = cpu_rq(cpu);
8332
8333 tg->rt_rq[cpu] = rt_rq;
8334 init_rt_rq(rt_rq, rq);
8335 rt_rq->tg = tg;
8336 rt_rq->rt_se = rt_se;
8337 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
8338 if (add)
8339 list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
8340
8341 tg->rt_se[cpu] = rt_se;
8342 if (!rt_se)
8343 return;
8344
8345 if (!parent)
8346 rt_se->rt_rq = &rq->rt;
8347 else
8348 rt_se->rt_rq = parent->my_q;
8349
8350 rt_se->my_q = rt_rq;
8351 rt_se->parent = parent;
8352 INIT_LIST_HEAD(&rt_se->run_list);
8353 }
8354 #endif
8355
sched_init(void)8356 void __init sched_init(void)
8357 {
8358 int i, j;
8359 unsigned long alloc_size = 0, ptr;
8360
8361 #ifdef CONFIG_FAIR_GROUP_SCHED
8362 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8363 #endif
8364 #ifdef CONFIG_RT_GROUP_SCHED
8365 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8366 #endif
8367 #ifdef CONFIG_USER_SCHED
8368 alloc_size *= 2;
8369 #endif
8370 /*
8371 * As sched_init() is called before page_alloc is setup,
8372 * we use alloc_bootmem().
8373 */
8374 if (alloc_size) {
8375 ptr = (unsigned long)alloc_bootmem(alloc_size);
8376
8377 #ifdef CONFIG_FAIR_GROUP_SCHED
8378 init_task_group.se = (struct sched_entity **)ptr;
8379 ptr += nr_cpu_ids * sizeof(void **);
8380
8381 init_task_group.cfs_rq = (struct cfs_rq **)ptr;
8382 ptr += nr_cpu_ids * sizeof(void **);
8383
8384 #ifdef CONFIG_USER_SCHED
8385 root_task_group.se = (struct sched_entity **)ptr;
8386 ptr += nr_cpu_ids * sizeof(void **);
8387
8388 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8389 ptr += nr_cpu_ids * sizeof(void **);
8390 #endif /* CONFIG_USER_SCHED */
8391 #endif /* CONFIG_FAIR_GROUP_SCHED */
8392 #ifdef CONFIG_RT_GROUP_SCHED
8393 init_task_group.rt_se = (struct sched_rt_entity **)ptr;
8394 ptr += nr_cpu_ids * sizeof(void **);
8395
8396 init_task_group.rt_rq = (struct rt_rq **)ptr;
8397 ptr += nr_cpu_ids * sizeof(void **);
8398
8399 #ifdef CONFIG_USER_SCHED
8400 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8401 ptr += nr_cpu_ids * sizeof(void **);
8402
8403 root_task_group.rt_rq = (struct rt_rq **)ptr;
8404 ptr += nr_cpu_ids * sizeof(void **);
8405 #endif /* CONFIG_USER_SCHED */
8406 #endif /* CONFIG_RT_GROUP_SCHED */
8407 }
8408
8409 #ifdef CONFIG_SMP
8410 init_defrootdomain();
8411 #endif
8412
8413 init_rt_bandwidth(&def_rt_bandwidth,
8414 global_rt_period(), global_rt_runtime());
8415
8416 #ifdef CONFIG_RT_GROUP_SCHED
8417 init_rt_bandwidth(&init_task_group.rt_bandwidth,
8418 global_rt_period(), global_rt_runtime());
8419 #ifdef CONFIG_USER_SCHED
8420 init_rt_bandwidth(&root_task_group.rt_bandwidth,
8421 global_rt_period(), RUNTIME_INF);
8422 #endif /* CONFIG_USER_SCHED */
8423 #endif /* CONFIG_RT_GROUP_SCHED */
8424
8425 #ifdef CONFIG_GROUP_SCHED
8426 list_add(&init_task_group.list, &task_groups);
8427 INIT_LIST_HEAD(&init_task_group.children);
8428
8429 #ifdef CONFIG_USER_SCHED
8430 INIT_LIST_HEAD(&root_task_group.children);
8431 init_task_group.parent = &root_task_group;
8432 list_add(&init_task_group.siblings, &root_task_group.children);
8433 #endif /* CONFIG_USER_SCHED */
8434 #endif /* CONFIG_GROUP_SCHED */
8435
8436 for_each_possible_cpu(i) {
8437 struct rq *rq;
8438
8439 rq = cpu_rq(i);
8440 spin_lock_init(&rq->lock);
8441 rq->nr_running = 0;
8442 init_cfs_rq(&rq->cfs, rq);
8443 init_rt_rq(&rq->rt, rq);
8444 #ifdef CONFIG_FAIR_GROUP_SCHED
8445 init_task_group.shares = init_task_group_load;
8446 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8447 #ifdef CONFIG_CGROUP_SCHED
8448 /*
8449 * How much cpu bandwidth does init_task_group get?
8450 *
8451 * In case of task-groups formed thr' the cgroup filesystem, it
8452 * gets 100% of the cpu resources in the system. This overall
8453 * system cpu resource is divided among the tasks of
8454 * init_task_group and its child task-groups in a fair manner,
8455 * based on each entity's (task or task-group's) weight
8456 * (se->load.weight).
8457 *
8458 * In other words, if init_task_group has 10 tasks of weight
8459 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8460 * then A0's share of the cpu resource is:
8461 *
8462 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8463 *
8464 * We achieve this by letting init_task_group's tasks sit
8465 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
8466 */
8467 init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
8468 #elif defined CONFIG_USER_SCHED
8469 root_task_group.shares = NICE_0_LOAD;
8470 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
8471 /*
8472 * In case of task-groups formed thr' the user id of tasks,
8473 * init_task_group represents tasks belonging to root user.
8474 * Hence it forms a sibling of all subsequent groups formed.
8475 * In this case, init_task_group gets only a fraction of overall
8476 * system cpu resource, based on the weight assigned to root
8477 * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
8478 * by letting tasks of init_task_group sit in a separate cfs_rq
8479 * (init_cfs_rq) and having one entity represent this group of
8480 * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
8481 */
8482 init_tg_cfs_entry(&init_task_group,
8483 &per_cpu(init_cfs_rq, i),
8484 &per_cpu(init_sched_entity, i), i, 1,
8485 root_task_group.se[i]);
8486
8487 #endif
8488 #endif /* CONFIG_FAIR_GROUP_SCHED */
8489
8490 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
8491 #ifdef CONFIG_RT_GROUP_SCHED
8492 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
8493 #ifdef CONFIG_CGROUP_SCHED
8494 init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
8495 #elif defined CONFIG_USER_SCHED
8496 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
8497 init_tg_rt_entry(&init_task_group,
8498 &per_cpu(init_rt_rq, i),
8499 &per_cpu(init_sched_rt_entity, i), i, 1,
8500 root_task_group.rt_se[i]);
8501 #endif
8502 #endif
8503
8504 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
8505 rq->cpu_load[j] = 0;
8506 #ifdef CONFIG_SMP
8507 rq->sd = NULL;
8508 rq->rd = NULL;
8509 rq->active_balance = 0;
8510 rq->next_balance = jiffies;
8511 rq->push_cpu = 0;
8512 rq->cpu = i;
8513 rq->online = 0;
8514 rq->migration_thread = NULL;
8515 INIT_LIST_HEAD(&rq->migration_queue);
8516 rq_attach_root(rq, &def_root_domain);
8517 #endif
8518 init_rq_hrtick(rq);
8519 atomic_set(&rq->nr_iowait, 0);
8520 }
8521
8522 set_load_weight(&init_task);
8523
8524 #ifdef CONFIG_PREEMPT_NOTIFIERS
8525 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
8526 #endif
8527
8528 #ifdef CONFIG_SMP
8529 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
8530 #endif
8531
8532 #ifdef CONFIG_RT_MUTEXES
8533 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
8534 #endif
8535
8536 /*
8537 * The boot idle thread does lazy MMU switching as well:
8538 */
8539 atomic_inc(&init_mm.mm_count);
8540 enter_lazy_tlb(&init_mm, current);
8541
8542 /*
8543 * Make us the idle thread. Technically, schedule() should not be
8544 * called from this thread, however somewhere below it might be,
8545 * but because we are the idle thread, we just pick up running again
8546 * when this runqueue becomes "idle".
8547 */
8548 init_idle(current, smp_processor_id());
8549 /*
8550 * During early bootup we pretend to be a normal task:
8551 */
8552 current->sched_class = &fair_sched_class;
8553
8554 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
8555 alloc_bootmem_cpumask_var(&nohz_cpu_mask);
8556 #ifdef CONFIG_SMP
8557 #ifdef CONFIG_NO_HZ
8558 alloc_bootmem_cpumask_var(&nohz.cpu_mask);
8559 #endif
8560 alloc_bootmem_cpumask_var(&cpu_isolated_map);
8561 #endif /* SMP */
8562
8563 scheduler_running = 1;
8564 }
8565
8566 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
8567 static int __might_sleep_init_called;
__might_sleep_init(void)8568 int __init __might_sleep_init(void)
8569 {
8570 __might_sleep_init_called = 1;
8571 return 0;
8572 }
8573 early_initcall(__might_sleep_init);
8574
__might_sleep(char * file,int line)8575 void __might_sleep(char *file, int line)
8576 {
8577 #ifdef in_atomic
8578 static unsigned long prev_jiffy; /* ratelimiting */
8579
8580 if ((!in_atomic() && !irqs_disabled()) || oops_in_progress)
8581 return;
8582 if (system_state != SYSTEM_RUNNING &&
8583 (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
8584 return;
8585 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8586 return;
8587 prev_jiffy = jiffies;
8588
8589 printk(KERN_ERR
8590 "BUG: sleeping function called from invalid context at %s:%d\n",
8591 file, line);
8592 printk(KERN_ERR
8593 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8594 in_atomic(), irqs_disabled(),
8595 current->pid, current->comm);
8596
8597 debug_show_held_locks(current);
8598 if (irqs_disabled())
8599 print_irqtrace_events(current);
8600 dump_stack();
8601 #endif
8602 }
8603 EXPORT_SYMBOL(__might_sleep);
8604 #endif
8605
8606 #ifdef CONFIG_MAGIC_SYSRQ
normalize_task(struct rq * rq,struct task_struct * p)8607 static void normalize_task(struct rq *rq, struct task_struct *p)
8608 {
8609 int on_rq;
8610
8611 update_rq_clock(rq);
8612 on_rq = p->se.on_rq;
8613 if (on_rq)
8614 deactivate_task(rq, p, 0);
8615 __setscheduler(rq, p, SCHED_NORMAL, 0);
8616 if (on_rq) {
8617 activate_task(rq, p, 0);
8618 resched_task(rq->curr);
8619 }
8620 }
8621
normalize_rt_tasks(void)8622 void normalize_rt_tasks(void)
8623 {
8624 struct task_struct *g, *p;
8625 unsigned long flags;
8626 struct rq *rq;
8627
8628 read_lock_irqsave(&tasklist_lock, flags);
8629 do_each_thread(g, p) {
8630 /*
8631 * Only normalize user tasks:
8632 */
8633 if (!p->mm)
8634 continue;
8635
8636 p->se.exec_start = 0;
8637 #ifdef CONFIG_SCHEDSTATS
8638 p->se.wait_start = 0;
8639 p->se.sleep_start = 0;
8640 p->se.block_start = 0;
8641 #endif
8642
8643 if (!rt_task(p)) {
8644 /*
8645 * Renice negative nice level userspace
8646 * tasks back to 0:
8647 */
8648 if (TASK_NICE(p) < 0 && p->mm)
8649 set_user_nice(p, 0);
8650 continue;
8651 }
8652
8653 spin_lock(&p->pi_lock);
8654 rq = __task_rq_lock(p);
8655
8656 normalize_task(rq, p);
8657
8658 __task_rq_unlock(rq);
8659 spin_unlock(&p->pi_lock);
8660 } while_each_thread(g, p);
8661
8662 read_unlock_irqrestore(&tasklist_lock, flags);
8663 }
8664
8665 #endif /* CONFIG_MAGIC_SYSRQ */
8666
8667 #ifdef CONFIG_IA64
8668 /*
8669 * These functions are only useful for the IA64 MCA handling.
8670 *
8671 * They can only be called when the whole system has been
8672 * stopped - every CPU needs to be quiescent, and no scheduling
8673 * activity can take place. Using them for anything else would
8674 * be a serious bug, and as a result, they aren't even visible
8675 * under any other configuration.
8676 */
8677
8678 /**
8679 * curr_task - return the current task for a given cpu.
8680 * @cpu: the processor in question.
8681 *
8682 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8683 */
curr_task(int cpu)8684 struct task_struct *curr_task(int cpu)
8685 {
8686 return cpu_curr(cpu);
8687 }
8688
8689 /**
8690 * set_curr_task - set the current task for a given cpu.
8691 * @cpu: the processor in question.
8692 * @p: the task pointer to set.
8693 *
8694 * Description: This function must only be used when non-maskable interrupts
8695 * are serviced on a separate stack. It allows the architecture to switch the
8696 * notion of the current task on a cpu in a non-blocking manner. This function
8697 * must be called with all CPU's synchronized, and interrupts disabled, the
8698 * and caller must save the original value of the current task (see
8699 * curr_task() above) and restore that value before reenabling interrupts and
8700 * re-starting the system.
8701 *
8702 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8703 */
set_curr_task(int cpu,struct task_struct * p)8704 void set_curr_task(int cpu, struct task_struct *p)
8705 {
8706 cpu_curr(cpu) = p;
8707 }
8708
8709 #endif
8710
8711 #ifdef CONFIG_FAIR_GROUP_SCHED
free_fair_sched_group(struct task_group * tg)8712 static void free_fair_sched_group(struct task_group *tg)
8713 {
8714 int i;
8715
8716 for_each_possible_cpu(i) {
8717 if (tg->cfs_rq)
8718 kfree(tg->cfs_rq[i]);
8719 if (tg->se)
8720 kfree(tg->se[i]);
8721 }
8722
8723 kfree(tg->cfs_rq);
8724 kfree(tg->se);
8725 }
8726
8727 static
alloc_fair_sched_group(struct task_group * tg,struct task_group * parent)8728 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8729 {
8730 struct cfs_rq *cfs_rq;
8731 struct sched_entity *se;
8732 struct rq *rq;
8733 int i;
8734
8735 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
8736 if (!tg->cfs_rq)
8737 goto err;
8738 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
8739 if (!tg->se)
8740 goto err;
8741
8742 tg->shares = NICE_0_LOAD;
8743
8744 for_each_possible_cpu(i) {
8745 rq = cpu_rq(i);
8746
8747 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8748 GFP_KERNEL, cpu_to_node(i));
8749 if (!cfs_rq)
8750 goto err;
8751
8752 se = kzalloc_node(sizeof(struct sched_entity),
8753 GFP_KERNEL, cpu_to_node(i));
8754 if (!se)
8755 goto err;
8756
8757 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
8758 }
8759
8760 return 1;
8761
8762 err:
8763 return 0;
8764 }
8765
register_fair_sched_group(struct task_group * tg,int cpu)8766 static inline void register_fair_sched_group(struct task_group *tg, int cpu)
8767 {
8768 list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
8769 &cpu_rq(cpu)->leaf_cfs_rq_list);
8770 }
8771
unregister_fair_sched_group(struct task_group * tg,int cpu)8772 static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8773 {
8774 list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
8775 }
8776 #else /* !CONFG_FAIR_GROUP_SCHED */
free_fair_sched_group(struct task_group * tg)8777 static inline void free_fair_sched_group(struct task_group *tg)
8778 {
8779 }
8780
8781 static inline
alloc_fair_sched_group(struct task_group * tg,struct task_group * parent)8782 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8783 {
8784 return 1;
8785 }
8786
register_fair_sched_group(struct task_group * tg,int cpu)8787 static inline void register_fair_sched_group(struct task_group *tg, int cpu)
8788 {
8789 }
8790
unregister_fair_sched_group(struct task_group * tg,int cpu)8791 static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8792 {
8793 }
8794 #endif /* CONFIG_FAIR_GROUP_SCHED */
8795
8796 #ifdef CONFIG_RT_GROUP_SCHED
free_rt_sched_group(struct task_group * tg)8797 static void free_rt_sched_group(struct task_group *tg)
8798 {
8799 int i;
8800
8801 destroy_rt_bandwidth(&tg->rt_bandwidth);
8802
8803 for_each_possible_cpu(i) {
8804 if (tg->rt_rq)
8805 kfree(tg->rt_rq[i]);
8806 if (tg->rt_se)
8807 kfree(tg->rt_se[i]);
8808 }
8809
8810 kfree(tg->rt_rq);
8811 kfree(tg->rt_se);
8812 }
8813
8814 static
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)8815 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8816 {
8817 struct rt_rq *rt_rq;
8818 struct sched_rt_entity *rt_se;
8819 struct rq *rq;
8820 int i;
8821
8822 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
8823 if (!tg->rt_rq)
8824 goto err;
8825 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
8826 if (!tg->rt_se)
8827 goto err;
8828
8829 init_rt_bandwidth(&tg->rt_bandwidth,
8830 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
8831
8832 for_each_possible_cpu(i) {
8833 rq = cpu_rq(i);
8834
8835 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8836 GFP_KERNEL, cpu_to_node(i));
8837 if (!rt_rq)
8838 goto err;
8839
8840 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8841 GFP_KERNEL, cpu_to_node(i));
8842 if (!rt_se)
8843 goto err;
8844
8845 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
8846 }
8847
8848 return 1;
8849
8850 err:
8851 return 0;
8852 }
8853
register_rt_sched_group(struct task_group * tg,int cpu)8854 static inline void register_rt_sched_group(struct task_group *tg, int cpu)
8855 {
8856 list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
8857 &cpu_rq(cpu)->leaf_rt_rq_list);
8858 }
8859
unregister_rt_sched_group(struct task_group * tg,int cpu)8860 static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
8861 {
8862 list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
8863 }
8864 #else /* !CONFIG_RT_GROUP_SCHED */
free_rt_sched_group(struct task_group * tg)8865 static inline void free_rt_sched_group(struct task_group *tg)
8866 {
8867 }
8868
8869 static inline
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)8870 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8871 {
8872 return 1;
8873 }
8874
register_rt_sched_group(struct task_group * tg,int cpu)8875 static inline void register_rt_sched_group(struct task_group *tg, int cpu)
8876 {
8877 }
8878
unregister_rt_sched_group(struct task_group * tg,int cpu)8879 static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
8880 {
8881 }
8882 #endif /* CONFIG_RT_GROUP_SCHED */
8883
8884 #ifdef CONFIG_GROUP_SCHED
free_sched_group(struct task_group * tg)8885 static void free_sched_group(struct task_group *tg)
8886 {
8887 free_fair_sched_group(tg);
8888 free_rt_sched_group(tg);
8889 kfree(tg);
8890 }
8891
8892 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)8893 struct task_group *sched_create_group(struct task_group *parent)
8894 {
8895 struct task_group *tg;
8896 unsigned long flags;
8897 int i;
8898
8899 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
8900 if (!tg)
8901 return ERR_PTR(-ENOMEM);
8902
8903 if (!alloc_fair_sched_group(tg, parent))
8904 goto err;
8905
8906 if (!alloc_rt_sched_group(tg, parent))
8907 goto err;
8908
8909 spin_lock_irqsave(&task_group_lock, flags);
8910 for_each_possible_cpu(i) {
8911 register_fair_sched_group(tg, i);
8912 register_rt_sched_group(tg, i);
8913 }
8914 list_add_rcu(&tg->list, &task_groups);
8915
8916 WARN_ON(!parent); /* root should already exist */
8917
8918 tg->parent = parent;
8919 INIT_LIST_HEAD(&tg->children);
8920 list_add_rcu(&tg->siblings, &parent->children);
8921 spin_unlock_irqrestore(&task_group_lock, flags);
8922
8923 return tg;
8924
8925 err:
8926 free_sched_group(tg);
8927 return ERR_PTR(-ENOMEM);
8928 }
8929
8930 /* rcu callback to free various structures associated with a task group */
free_sched_group_rcu(struct rcu_head * rhp)8931 static void free_sched_group_rcu(struct rcu_head *rhp)
8932 {
8933 /* now it should be safe to free those cfs_rqs */
8934 free_sched_group(container_of(rhp, struct task_group, rcu));
8935 }
8936
8937 /* Destroy runqueue etc associated with a task group */
sched_destroy_group(struct task_group * tg)8938 void sched_destroy_group(struct task_group *tg)
8939 {
8940 unsigned long flags;
8941 int i;
8942
8943 spin_lock_irqsave(&task_group_lock, flags);
8944 for_each_possible_cpu(i) {
8945 unregister_fair_sched_group(tg, i);
8946 unregister_rt_sched_group(tg, i);
8947 }
8948 list_del_rcu(&tg->list);
8949 list_del_rcu(&tg->siblings);
8950 spin_unlock_irqrestore(&task_group_lock, flags);
8951
8952 /* wait for possible concurrent references to cfs_rqs complete */
8953 call_rcu(&tg->rcu, free_sched_group_rcu);
8954 }
8955
8956 /* change task's runqueue when it moves between groups.
8957 * The caller of this function should have put the task in its new group
8958 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8959 * reflect its new group.
8960 */
sched_move_task(struct task_struct * tsk)8961 void sched_move_task(struct task_struct *tsk)
8962 {
8963 int on_rq, running;
8964 unsigned long flags;
8965 struct rq *rq;
8966
8967 rq = task_rq_lock(tsk, &flags);
8968
8969 update_rq_clock(rq);
8970
8971 running = task_current(rq, tsk);
8972 on_rq = tsk->se.on_rq;
8973
8974 if (on_rq)
8975 dequeue_task(rq, tsk, 0);
8976 if (unlikely(running))
8977 tsk->sched_class->put_prev_task(rq, tsk);
8978
8979 set_task_rq(tsk, task_cpu(tsk));
8980
8981 #ifdef CONFIG_FAIR_GROUP_SCHED
8982 if (tsk->sched_class->moved_group)
8983 tsk->sched_class->moved_group(tsk);
8984 #endif
8985
8986 if (unlikely(running))
8987 tsk->sched_class->set_curr_task(rq);
8988 if (on_rq)
8989 enqueue_task(rq, tsk, 0);
8990
8991 task_rq_unlock(rq, &flags);
8992 }
8993 #endif /* CONFIG_GROUP_SCHED */
8994
8995 #ifdef CONFIG_FAIR_GROUP_SCHED
__set_se_shares(struct sched_entity * se,unsigned long shares)8996 static void __set_se_shares(struct sched_entity *se, unsigned long shares)
8997 {
8998 struct cfs_rq *cfs_rq = se->cfs_rq;
8999 int on_rq;
9000
9001 on_rq = se->on_rq;
9002 if (on_rq)
9003 dequeue_entity(cfs_rq, se, 0);
9004
9005 se->load.weight = shares;
9006 se->load.inv_weight = 0;
9007
9008 if (on_rq)
9009 enqueue_entity(cfs_rq, se, 0);
9010 }
9011
set_se_shares(struct sched_entity * se,unsigned long shares)9012 static void set_se_shares(struct sched_entity *se, unsigned long shares)
9013 {
9014 struct cfs_rq *cfs_rq = se->cfs_rq;
9015 struct rq *rq = cfs_rq->rq;
9016 unsigned long flags;
9017
9018 spin_lock_irqsave(&rq->lock, flags);
9019 __set_se_shares(se, shares);
9020 spin_unlock_irqrestore(&rq->lock, flags);
9021 }
9022
9023 static DEFINE_MUTEX(shares_mutex);
9024
sched_group_set_shares(struct task_group * tg,unsigned long shares)9025 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
9026 {
9027 int i;
9028 unsigned long flags;
9029
9030 /*
9031 * We can't change the weight of the root cgroup.
9032 */
9033 if (!tg->se[0])
9034 return -EINVAL;
9035
9036 if (shares < MIN_SHARES)
9037 shares = MIN_SHARES;
9038 else if (shares > MAX_SHARES)
9039 shares = MAX_SHARES;
9040
9041 mutex_lock(&shares_mutex);
9042 if (tg->shares == shares)
9043 goto done;
9044
9045 spin_lock_irqsave(&task_group_lock, flags);
9046 for_each_possible_cpu(i)
9047 unregister_fair_sched_group(tg, i);
9048 list_del_rcu(&tg->siblings);
9049 spin_unlock_irqrestore(&task_group_lock, flags);
9050
9051 /* wait for any ongoing reference to this group to finish */
9052 synchronize_sched();
9053
9054 /*
9055 * Now we are free to modify the group's share on each cpu
9056 * w/o tripping rebalance_share or load_balance_fair.
9057 */
9058 tg->shares = shares;
9059 for_each_possible_cpu(i) {
9060 /*
9061 * force a rebalance
9062 */
9063 cfs_rq_set_shares(tg->cfs_rq[i], 0);
9064 set_se_shares(tg->se[i], shares);
9065 }
9066
9067 /*
9068 * Enable load balance activity on this group, by inserting it back on
9069 * each cpu's rq->leaf_cfs_rq_list.
9070 */
9071 spin_lock_irqsave(&task_group_lock, flags);
9072 for_each_possible_cpu(i)
9073 register_fair_sched_group(tg, i);
9074 list_add_rcu(&tg->siblings, &tg->parent->children);
9075 spin_unlock_irqrestore(&task_group_lock, flags);
9076 done:
9077 mutex_unlock(&shares_mutex);
9078 return 0;
9079 }
9080
sched_group_shares(struct task_group * tg)9081 unsigned long sched_group_shares(struct task_group *tg)
9082 {
9083 return tg->shares;
9084 }
9085 #endif
9086
9087 #ifdef CONFIG_RT_GROUP_SCHED
9088 /*
9089 * Ensure that the real time constraints are schedulable.
9090 */
9091 static DEFINE_MUTEX(rt_constraints_mutex);
9092
to_ratio(u64 period,u64 runtime)9093 static unsigned long to_ratio(u64 period, u64 runtime)
9094 {
9095 if (runtime == RUNTIME_INF)
9096 return 1ULL << 20;
9097
9098 return div64_u64(runtime << 20, period);
9099 }
9100
9101 /* Must be called with tasklist_lock held */
tg_has_rt_tasks(struct task_group * tg)9102 static inline int tg_has_rt_tasks(struct task_group *tg)
9103 {
9104 struct task_struct *g, *p;
9105
9106 do_each_thread(g, p) {
9107 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
9108 return 1;
9109 } while_each_thread(g, p);
9110
9111 return 0;
9112 }
9113
9114 struct rt_schedulable_data {
9115 struct task_group *tg;
9116 u64 rt_period;
9117 u64 rt_runtime;
9118 };
9119
tg_schedulable(struct task_group * tg,void * data)9120 static int tg_schedulable(struct task_group *tg, void *data)
9121 {
9122 struct rt_schedulable_data *d = data;
9123 struct task_group *child;
9124 unsigned long total, sum = 0;
9125 u64 period, runtime;
9126
9127 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
9128 runtime = tg->rt_bandwidth.rt_runtime;
9129
9130 if (tg == d->tg) {
9131 period = d->rt_period;
9132 runtime = d->rt_runtime;
9133 }
9134
9135 #ifdef CONFIG_USER_SCHED
9136 if (tg == &root_task_group) {
9137 period = global_rt_period();
9138 runtime = global_rt_runtime();
9139 }
9140 #endif
9141
9142 /*
9143 * Cannot have more runtime than the period.
9144 */
9145 if (runtime > period && runtime != RUNTIME_INF)
9146 return -EINVAL;
9147
9148 /*
9149 * Ensure we don't starve existing RT tasks.
9150 */
9151 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
9152 return -EBUSY;
9153
9154 total = to_ratio(period, runtime);
9155
9156 /*
9157 * Nobody can have more than the global setting allows.
9158 */
9159 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
9160 return -EINVAL;
9161
9162 /*
9163 * The sum of our children's runtime should not exceed our own.
9164 */
9165 list_for_each_entry_rcu(child, &tg->children, siblings) {
9166 period = ktime_to_ns(child->rt_bandwidth.rt_period);
9167 runtime = child->rt_bandwidth.rt_runtime;
9168
9169 if (child == d->tg) {
9170 period = d->rt_period;
9171 runtime = d->rt_runtime;
9172 }
9173
9174 sum += to_ratio(period, runtime);
9175 }
9176
9177 if (sum > total)
9178 return -EINVAL;
9179
9180 return 0;
9181 }
9182
__rt_schedulable(struct task_group * tg,u64 period,u64 runtime)9183 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
9184 {
9185 struct rt_schedulable_data data = {
9186 .tg = tg,
9187 .rt_period = period,
9188 .rt_runtime = runtime,
9189 };
9190
9191 return walk_tg_tree(tg_schedulable, tg_nop, &data);
9192 }
9193
tg_set_bandwidth(struct task_group * tg,u64 rt_period,u64 rt_runtime)9194 static int tg_set_bandwidth(struct task_group *tg,
9195 u64 rt_period, u64 rt_runtime)
9196 {
9197 int i, err = 0;
9198
9199 mutex_lock(&rt_constraints_mutex);
9200 read_lock(&tasklist_lock);
9201 err = __rt_schedulable(tg, rt_period, rt_runtime);
9202 if (err)
9203 goto unlock;
9204
9205 spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
9206 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
9207 tg->rt_bandwidth.rt_runtime = rt_runtime;
9208
9209 for_each_possible_cpu(i) {
9210 struct rt_rq *rt_rq = tg->rt_rq[i];
9211
9212 spin_lock(&rt_rq->rt_runtime_lock);
9213 rt_rq->rt_runtime = rt_runtime;
9214 spin_unlock(&rt_rq->rt_runtime_lock);
9215 }
9216 spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
9217 unlock:
9218 read_unlock(&tasklist_lock);
9219 mutex_unlock(&rt_constraints_mutex);
9220
9221 return err;
9222 }
9223
sched_group_set_rt_runtime(struct task_group * tg,long rt_runtime_us)9224 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
9225 {
9226 u64 rt_runtime, rt_period;
9227
9228 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
9229 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
9230 if (rt_runtime_us < 0)
9231 rt_runtime = RUNTIME_INF;
9232
9233 return tg_set_bandwidth(tg, rt_period, rt_runtime);
9234 }
9235
sched_group_rt_runtime(struct task_group * tg)9236 long sched_group_rt_runtime(struct task_group *tg)
9237 {
9238 u64 rt_runtime_us;
9239
9240 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9241 return -1;
9242
9243 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9244 do_div(rt_runtime_us, NSEC_PER_USEC);
9245 return rt_runtime_us;
9246 }
9247
sched_group_set_rt_period(struct task_group * tg,long rt_period_us)9248 int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
9249 {
9250 u64 rt_runtime, rt_period;
9251
9252 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
9253 rt_runtime = tg->rt_bandwidth.rt_runtime;
9254
9255 if (rt_period == 0)
9256 return -EINVAL;
9257
9258 return tg_set_bandwidth(tg, rt_period, rt_runtime);
9259 }
9260
sched_group_rt_period(struct task_group * tg)9261 long sched_group_rt_period(struct task_group *tg)
9262 {
9263 u64 rt_period_us;
9264
9265 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
9266 do_div(rt_period_us, NSEC_PER_USEC);
9267 return rt_period_us;
9268 }
9269
sched_rt_global_constraints(void)9270 static int sched_rt_global_constraints(void)
9271 {
9272 u64 runtime, period;
9273 int ret = 0;
9274
9275 if (sysctl_sched_rt_period <= 0)
9276 return -EINVAL;
9277
9278 runtime = global_rt_runtime();
9279 period = global_rt_period();
9280
9281 /*
9282 * Sanity check on the sysctl variables.
9283 */
9284 if (runtime > period && runtime != RUNTIME_INF)
9285 return -EINVAL;
9286
9287 mutex_lock(&rt_constraints_mutex);
9288 read_lock(&tasklist_lock);
9289 ret = __rt_schedulable(NULL, 0, 0);
9290 read_unlock(&tasklist_lock);
9291 mutex_unlock(&rt_constraints_mutex);
9292
9293 return ret;
9294 }
9295
sched_rt_can_attach(struct task_group * tg,struct task_struct * tsk)9296 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
9297 {
9298 /* Don't accept realtime tasks when there is no way for them to run */
9299 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
9300 return 0;
9301
9302 return 1;
9303 }
9304
9305 #else /* !CONFIG_RT_GROUP_SCHED */
sched_rt_global_constraints(void)9306 static int sched_rt_global_constraints(void)
9307 {
9308 unsigned long flags;
9309 int i;
9310
9311 if (sysctl_sched_rt_period <= 0)
9312 return -EINVAL;
9313
9314 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
9315 for_each_possible_cpu(i) {
9316 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
9317
9318 spin_lock(&rt_rq->rt_runtime_lock);
9319 rt_rq->rt_runtime = global_rt_runtime();
9320 spin_unlock(&rt_rq->rt_runtime_lock);
9321 }
9322 spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
9323
9324 return 0;
9325 }
9326 #endif /* CONFIG_RT_GROUP_SCHED */
9327
sched_rt_handler(struct ctl_table * table,int write,struct file * filp,void __user * buffer,size_t * lenp,loff_t * ppos)9328 int sched_rt_handler(struct ctl_table *table, int write,
9329 struct file *filp, void __user *buffer, size_t *lenp,
9330 loff_t *ppos)
9331 {
9332 int ret;
9333 int old_period, old_runtime;
9334 static DEFINE_MUTEX(mutex);
9335
9336 mutex_lock(&mutex);
9337 old_period = sysctl_sched_rt_period;
9338 old_runtime = sysctl_sched_rt_runtime;
9339
9340 ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
9341
9342 if (!ret && write) {
9343 ret = sched_rt_global_constraints();
9344 if (ret) {
9345 sysctl_sched_rt_period = old_period;
9346 sysctl_sched_rt_runtime = old_runtime;
9347 } else {
9348 def_rt_bandwidth.rt_runtime = global_rt_runtime();
9349 def_rt_bandwidth.rt_period =
9350 ns_to_ktime(global_rt_period());
9351 }
9352 }
9353 mutex_unlock(&mutex);
9354
9355 return ret;
9356 }
9357
9358 #ifdef CONFIG_CGROUP_SCHED
9359
9360 /* return corresponding task_group object of a cgroup */
cgroup_tg(struct cgroup * cgrp)9361 static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
9362 {
9363 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
9364 struct task_group, css);
9365 }
9366
9367 static struct cgroup_subsys_state *
cpu_cgroup_create(struct cgroup_subsys * ss,struct cgroup * cgrp)9368 cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
9369 {
9370 struct task_group *tg, *parent;
9371
9372 if (!cgrp->parent) {
9373 /* This is early initialization for the top cgroup */
9374 return &init_task_group.css;
9375 }
9376
9377 parent = cgroup_tg(cgrp->parent);
9378 tg = sched_create_group(parent);
9379 if (IS_ERR(tg))
9380 return ERR_PTR(-ENOMEM);
9381
9382 return &tg->css;
9383 }
9384
9385 static void
cpu_cgroup_destroy(struct cgroup_subsys * ss,struct cgroup * cgrp)9386 cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
9387 {
9388 struct task_group *tg = cgroup_tg(cgrp);
9389
9390 sched_destroy_group(tg);
9391 }
9392
9393 static int
cpu_cgroup_can_attach(struct cgroup_subsys * ss,struct cgroup * cgrp,struct task_struct * tsk)9394 cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9395 struct task_struct *tsk)
9396 {
9397 if ((current != tsk) && (!capable(CAP_SYS_NICE))) {
9398 const struct cred *cred = current_cred(), *tcred;
9399
9400 tcred = __task_cred(tsk);
9401
9402 if (cred->euid != tcred->uid && cred->euid != tcred->suid)
9403 return -EPERM;
9404 }
9405
9406 #ifdef CONFIG_RT_GROUP_SCHED
9407 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
9408 return -EINVAL;
9409 #else
9410 /* We don't support RT-tasks being in separate groups */
9411 if (tsk->sched_class != &fair_sched_class)
9412 return -EINVAL;
9413 #endif
9414
9415 return 0;
9416 }
9417
9418 static void
cpu_cgroup_attach(struct cgroup_subsys * ss,struct cgroup * cgrp,struct cgroup * old_cont,struct task_struct * tsk)9419 cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9420 struct cgroup *old_cont, struct task_struct *tsk)
9421 {
9422 sched_move_task(tsk);
9423 }
9424
9425 #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_shares_write_u64(struct cgroup * cgrp,struct cftype * cftype,u64 shareval)9426 static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
9427 u64 shareval)
9428 {
9429 return sched_group_set_shares(cgroup_tg(cgrp), shareval);
9430 }
9431
cpu_shares_read_u64(struct cgroup * cgrp,struct cftype * cft)9432 static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
9433 {
9434 struct task_group *tg = cgroup_tg(cgrp);
9435
9436 return (u64) tg->shares;
9437 }
9438 #endif /* CONFIG_FAIR_GROUP_SCHED */
9439
9440 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup * cgrp,struct cftype * cft,s64 val)9441 static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
9442 s64 val)
9443 {
9444 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
9445 }
9446
cpu_rt_runtime_read(struct cgroup * cgrp,struct cftype * cft)9447 static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
9448 {
9449 return sched_group_rt_runtime(cgroup_tg(cgrp));
9450 }
9451
cpu_rt_period_write_uint(struct cgroup * cgrp,struct cftype * cftype,u64 rt_period_us)9452 static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
9453 u64 rt_period_us)
9454 {
9455 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
9456 }
9457
cpu_rt_period_read_uint(struct cgroup * cgrp,struct cftype * cft)9458 static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
9459 {
9460 return sched_group_rt_period(cgroup_tg(cgrp));
9461 }
9462 #endif /* CONFIG_RT_GROUP_SCHED */
9463
9464 static struct cftype cpu_files[] = {
9465 #ifdef CONFIG_FAIR_GROUP_SCHED
9466 {
9467 .name = "shares",
9468 .read_u64 = cpu_shares_read_u64,
9469 .write_u64 = cpu_shares_write_u64,
9470 },
9471 #endif
9472 #ifdef CONFIG_RT_GROUP_SCHED
9473 {
9474 .name = "rt_runtime_us",
9475 .read_s64 = cpu_rt_runtime_read,
9476 .write_s64 = cpu_rt_runtime_write,
9477 },
9478 {
9479 .name = "rt_period_us",
9480 .read_u64 = cpu_rt_period_read_uint,
9481 .write_u64 = cpu_rt_period_write_uint,
9482 },
9483 #endif
9484 };
9485
cpu_cgroup_populate(struct cgroup_subsys * ss,struct cgroup * cont)9486 static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
9487 {
9488 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
9489 }
9490
9491 struct cgroup_subsys cpu_cgroup_subsys = {
9492 .name = "cpu",
9493 .create = cpu_cgroup_create,
9494 .destroy = cpu_cgroup_destroy,
9495 .can_attach = cpu_cgroup_can_attach,
9496 .attach = cpu_cgroup_attach,
9497 .populate = cpu_cgroup_populate,
9498 .subsys_id = cpu_cgroup_subsys_id,
9499 .early_init = 1,
9500 };
9501
9502 #endif /* CONFIG_CGROUP_SCHED */
9503
9504 #ifdef CONFIG_CGROUP_CPUACCT
9505
9506 /*
9507 * CPU accounting code for task groups.
9508 *
9509 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
9510 * (balbir@in.ibm.com).
9511 */
9512
9513 /* track cpu usage of a group of tasks and its child groups */
9514 struct cpuacct {
9515 struct cgroup_subsys_state css;
9516 /* cpuusage holds pointer to a u64-type object on every cpu */
9517 u64 *cpuusage;
9518 struct cpuacct *parent;
9519 };
9520
9521 struct cgroup_subsys cpuacct_subsys;
9522
9523 /* return cpu accounting group corresponding to this container */
cgroup_ca(struct cgroup * cgrp)9524 static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
9525 {
9526 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
9527 struct cpuacct, css);
9528 }
9529
9530 /* return cpu accounting group to which this task belongs */
task_ca(struct task_struct * tsk)9531 static inline struct cpuacct *task_ca(struct task_struct *tsk)
9532 {
9533 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
9534 struct cpuacct, css);
9535 }
9536
9537 /* create a new cpu accounting group */
cpuacct_create(struct cgroup_subsys * ss,struct cgroup * cgrp)9538 static struct cgroup_subsys_state *cpuacct_create(
9539 struct cgroup_subsys *ss, struct cgroup *cgrp)
9540 {
9541 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
9542
9543 if (!ca)
9544 return ERR_PTR(-ENOMEM);
9545
9546 ca->cpuusage = alloc_percpu(u64);
9547 if (!ca->cpuusage) {
9548 kfree(ca);
9549 return ERR_PTR(-ENOMEM);
9550 }
9551
9552 if (cgrp->parent)
9553 ca->parent = cgroup_ca(cgrp->parent);
9554
9555 return &ca->css;
9556 }
9557
9558 /* destroy an existing cpu accounting group */
9559 static void
cpuacct_destroy(struct cgroup_subsys * ss,struct cgroup * cgrp)9560 cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
9561 {
9562 struct cpuacct *ca = cgroup_ca(cgrp);
9563
9564 free_percpu(ca->cpuusage);
9565 kfree(ca);
9566 }
9567
cpuacct_cpuusage_read(struct cpuacct * ca,int cpu)9568 static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9569 {
9570 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
9571 u64 data;
9572
9573 #ifndef CONFIG_64BIT
9574 /*
9575 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
9576 */
9577 spin_lock_irq(&cpu_rq(cpu)->lock);
9578 data = *cpuusage;
9579 spin_unlock_irq(&cpu_rq(cpu)->lock);
9580 #else
9581 data = *cpuusage;
9582 #endif
9583
9584 return data;
9585 }
9586
cpuacct_cpuusage_write(struct cpuacct * ca,int cpu,u64 val)9587 static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
9588 {
9589 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
9590
9591 #ifndef CONFIG_64BIT
9592 /*
9593 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
9594 */
9595 spin_lock_irq(&cpu_rq(cpu)->lock);
9596 *cpuusage = val;
9597 spin_unlock_irq(&cpu_rq(cpu)->lock);
9598 #else
9599 *cpuusage = val;
9600 #endif
9601 }
9602
9603 /* return total cpu usage (in nanoseconds) of a group */
cpuusage_read(struct cgroup * cgrp,struct cftype * cft)9604 static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
9605 {
9606 struct cpuacct *ca = cgroup_ca(cgrp);
9607 u64 totalcpuusage = 0;
9608 int i;
9609
9610 for_each_present_cpu(i)
9611 totalcpuusage += cpuacct_cpuusage_read(ca, i);
9612
9613 return totalcpuusage;
9614 }
9615
cpuusage_write(struct cgroup * cgrp,struct cftype * cftype,u64 reset)9616 static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
9617 u64 reset)
9618 {
9619 struct cpuacct *ca = cgroup_ca(cgrp);
9620 int err = 0;
9621 int i;
9622
9623 if (reset) {
9624 err = -EINVAL;
9625 goto out;
9626 }
9627
9628 for_each_present_cpu(i)
9629 cpuacct_cpuusage_write(ca, i, 0);
9630
9631 out:
9632 return err;
9633 }
9634
cpuacct_percpu_seq_read(struct cgroup * cgroup,struct cftype * cft,struct seq_file * m)9635 static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
9636 struct seq_file *m)
9637 {
9638 struct cpuacct *ca = cgroup_ca(cgroup);
9639 u64 percpu;
9640 int i;
9641
9642 for_each_present_cpu(i) {
9643 percpu = cpuacct_cpuusage_read(ca, i);
9644 seq_printf(m, "%llu ", (unsigned long long) percpu);
9645 }
9646 seq_printf(m, "\n");
9647 return 0;
9648 }
9649
9650 static struct cftype files[] = {
9651 {
9652 .name = "usage",
9653 .read_u64 = cpuusage_read,
9654 .write_u64 = cpuusage_write,
9655 },
9656 {
9657 .name = "usage_percpu",
9658 .read_seq_string = cpuacct_percpu_seq_read,
9659 },
9660
9661 };
9662
cpuacct_populate(struct cgroup_subsys * ss,struct cgroup * cgrp)9663 static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
9664 {
9665 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
9666 }
9667
9668 /*
9669 * charge this task's execution time to its accounting group.
9670 *
9671 * called with rq->lock held.
9672 */
cpuacct_charge(struct task_struct * tsk,u64 cputime)9673 static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9674 {
9675 struct cpuacct *ca;
9676 int cpu;
9677
9678 if (!cpuacct_subsys.active)
9679 return;
9680
9681 cpu = task_cpu(tsk);
9682 ca = task_ca(tsk);
9683
9684 for (; ca; ca = ca->parent) {
9685 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
9686 *cpuusage += cputime;
9687 }
9688 }
9689
9690 struct cgroup_subsys cpuacct_subsys = {
9691 .name = "cpuacct",
9692 .create = cpuacct_create,
9693 .destroy = cpuacct_destroy,
9694 .populate = cpuacct_populate,
9695 .subsys_id = cpuacct_subsys_id,
9696 };
9697 #endif /* CONFIG_CGROUP_CPUACCT */
9698