1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/core.c
4 *
5 * Core kernel scheduler code and related syscalls
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 */
9 #define CREATE_TRACE_POINTS
10 #include <trace/events/sched.h>
11 #undef CREATE_TRACE_POINTS
12
13 #include "sched.h"
14
15 #include <linux/nospec.h>
16
17 #include <linux/kcov.h>
18 #include <linux/scs.h>
19
20 #include <asm/switch_to.h>
21 #include <asm/tlb.h>
22
23 #include "../workqueue_internal.h"
24 #include "../../io_uring/io-wq.h"
25 #include "../smpboot.h"
26
27 #include "pelt.h"
28 #include "smp.h"
29
30 #include <trace/hooks/sched.h>
31 #include <trace/hooks/dtask.h>
32 #include <trace/hooks/cgroup.h>
33
34 /*
35 * Export tracepoints that act as a bare tracehook (ie: have no trace event
36 * associated with them) to allow external modules to probe them.
37 */
38 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
39 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
40 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
41 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
42 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
43 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp);
44 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
45 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
46 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
47 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
48 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
49 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_switch);
50 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_waking);
51 #ifdef CONFIG_SCHEDSTATS
52 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_sleep);
53 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_wait);
54 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_iowait);
55 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_blocked);
56 #endif
57
58 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
59 EXPORT_SYMBOL_GPL(runqueues);
60
61 #ifdef CONFIG_SCHED_DEBUG
62 /*
63 * Debugging: various feature bits
64 *
65 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
66 * sysctl_sched_features, defined in sched.h, to allow constants propagation
67 * at compile time and compiler optimization based on features default.
68 */
69 #define SCHED_FEAT(name, enabled) \
70 (1UL << __SCHED_FEAT_##name) * enabled |
71 const_debug unsigned int sysctl_sched_features =
72 #include "features.h"
73 0;
74 EXPORT_SYMBOL_GPL(sysctl_sched_features);
75 #undef SCHED_FEAT
76
77 /*
78 * Print a warning if need_resched is set for the given duration (if
79 * LATENCY_WARN is enabled).
80 *
81 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
82 * per boot.
83 */
84 __read_mostly int sysctl_resched_latency_warn_ms = 100;
85 __read_mostly int sysctl_resched_latency_warn_once = 1;
86 #endif /* CONFIG_SCHED_DEBUG */
87
88 /*
89 * Number of tasks to iterate in a single balance run.
90 * Limited because this is done with IRQs disabled.
91 */
92 const_debug unsigned int sysctl_sched_nr_migrate = 32;
93
94 /*
95 * period over which we measure -rt task CPU usage in us.
96 * default: 1s
97 */
98 unsigned int sysctl_sched_rt_period = 1000000;
99
100 __read_mostly int scheduler_running;
101
102 #ifdef CONFIG_SCHED_CORE
103
104 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
105
106 /* kernel prio, less is more */
__task_prio(struct task_struct * p)107 static inline int __task_prio(struct task_struct *p)
108 {
109 if (p->sched_class == &stop_sched_class) /* trumps deadline */
110 return -2;
111
112 if (rt_prio(p->prio)) /* includes deadline */
113 return p->prio; /* [-1, 99] */
114
115 if (p->sched_class == &idle_sched_class)
116 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
117
118 return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
119 }
120
121 /*
122 * l(a,b)
123 * le(a,b) := !l(b,a)
124 * g(a,b) := l(b,a)
125 * ge(a,b) := !l(a,b)
126 */
127
128 /* real prio, less is less */
prio_less(struct task_struct * a,struct task_struct * b,bool in_fi)129 static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
130 {
131
132 int pa = __task_prio(a), pb = __task_prio(b);
133
134 if (-pa < -pb)
135 return true;
136
137 if (-pb < -pa)
138 return false;
139
140 if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
141 return !dl_time_before(a->dl.deadline, b->dl.deadline);
142
143 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
144 return cfs_prio_less(a, b, in_fi);
145
146 return false;
147 }
148
__sched_core_less(struct task_struct * a,struct task_struct * b)149 static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b)
150 {
151 if (a->core_cookie < b->core_cookie)
152 return true;
153
154 if (a->core_cookie > b->core_cookie)
155 return false;
156
157 /* flip prio, so high prio is leftmost */
158 if (prio_less(b, a, task_rq(a)->core->core_forceidle))
159 return true;
160
161 return false;
162 }
163
164 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
165
rb_sched_core_less(struct rb_node * a,const struct rb_node * b)166 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
167 {
168 return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
169 }
170
rb_sched_core_cmp(const void * key,const struct rb_node * node)171 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
172 {
173 const struct task_struct *p = __node_2_sc(node);
174 unsigned long cookie = (unsigned long)key;
175
176 if (cookie < p->core_cookie)
177 return -1;
178
179 if (cookie > p->core_cookie)
180 return 1;
181
182 return 0;
183 }
184
sched_core_enqueue(struct rq * rq,struct task_struct * p)185 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
186 {
187 rq->core->core_task_seq++;
188
189 if (!p->core_cookie)
190 return;
191
192 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
193 }
194
sched_core_dequeue(struct rq * rq,struct task_struct * p)195 void sched_core_dequeue(struct rq *rq, struct task_struct *p)
196 {
197 rq->core->core_task_seq++;
198
199 if (!sched_core_enqueued(p))
200 return;
201
202 rb_erase(&p->core_node, &rq->core_tree);
203 RB_CLEAR_NODE(&p->core_node);
204 }
205
206 /*
207 * Find left-most (aka, highest priority) task matching @cookie.
208 */
sched_core_find(struct rq * rq,unsigned long cookie)209 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
210 {
211 struct rb_node *node;
212
213 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
214 /*
215 * The idle task always matches any cookie!
216 */
217 if (!node)
218 return idle_sched_class.pick_task(rq);
219
220 return __node_2_sc(node);
221 }
222
sched_core_next(struct task_struct * p,unsigned long cookie)223 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
224 {
225 struct rb_node *node = &p->core_node;
226
227 node = rb_next(node);
228 if (!node)
229 return NULL;
230
231 p = container_of(node, struct task_struct, core_node);
232 if (p->core_cookie != cookie)
233 return NULL;
234
235 return p;
236 }
237
238 /*
239 * Magic required such that:
240 *
241 * raw_spin_rq_lock(rq);
242 * ...
243 * raw_spin_rq_unlock(rq);
244 *
245 * ends up locking and unlocking the _same_ lock, and all CPUs
246 * always agree on what rq has what lock.
247 *
248 * XXX entirely possible to selectively enable cores, don't bother for now.
249 */
250
251 static DEFINE_MUTEX(sched_core_mutex);
252 static atomic_t sched_core_count;
253 static struct cpumask sched_core_mask;
254
sched_core_lock(int cpu,unsigned long * flags)255 static void sched_core_lock(int cpu, unsigned long *flags)
256 {
257 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
258 int t, i = 0;
259
260 local_irq_save(*flags);
261 for_each_cpu(t, smt_mask)
262 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
263 }
264
sched_core_unlock(int cpu,unsigned long * flags)265 static void sched_core_unlock(int cpu, unsigned long *flags)
266 {
267 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
268 int t;
269
270 for_each_cpu(t, smt_mask)
271 raw_spin_unlock(&cpu_rq(t)->__lock);
272 local_irq_restore(*flags);
273 }
274
__sched_core_flip(bool enabled)275 static void __sched_core_flip(bool enabled)
276 {
277 unsigned long flags;
278 int cpu, t;
279
280 cpus_read_lock();
281
282 /*
283 * Toggle the online cores, one by one.
284 */
285 cpumask_copy(&sched_core_mask, cpu_online_mask);
286 for_each_cpu(cpu, &sched_core_mask) {
287 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
288
289 sched_core_lock(cpu, &flags);
290
291 for_each_cpu(t, smt_mask)
292 cpu_rq(t)->core_enabled = enabled;
293
294 sched_core_unlock(cpu, &flags);
295
296 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
297 }
298
299 /*
300 * Toggle the offline CPUs.
301 */
302 cpumask_copy(&sched_core_mask, cpu_possible_mask);
303 cpumask_andnot(&sched_core_mask, &sched_core_mask, cpu_online_mask);
304
305 for_each_cpu(cpu, &sched_core_mask)
306 cpu_rq(cpu)->core_enabled = enabled;
307
308 cpus_read_unlock();
309 }
310
sched_core_assert_empty(void)311 static void sched_core_assert_empty(void)
312 {
313 int cpu;
314
315 for_each_possible_cpu(cpu)
316 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
317 }
318
__sched_core_enable(void)319 static void __sched_core_enable(void)
320 {
321 static_branch_enable(&__sched_core_enabled);
322 /*
323 * Ensure all previous instances of raw_spin_rq_*lock() have finished
324 * and future ones will observe !sched_core_disabled().
325 */
326 synchronize_rcu();
327 __sched_core_flip(true);
328 sched_core_assert_empty();
329 }
330
__sched_core_disable(void)331 static void __sched_core_disable(void)
332 {
333 sched_core_assert_empty();
334 __sched_core_flip(false);
335 static_branch_disable(&__sched_core_enabled);
336 }
337
sched_core_get(void)338 void sched_core_get(void)
339 {
340 if (atomic_inc_not_zero(&sched_core_count))
341 return;
342
343 mutex_lock(&sched_core_mutex);
344 if (!atomic_read(&sched_core_count))
345 __sched_core_enable();
346
347 smp_mb__before_atomic();
348 atomic_inc(&sched_core_count);
349 mutex_unlock(&sched_core_mutex);
350 }
351
__sched_core_put(struct work_struct * work)352 static void __sched_core_put(struct work_struct *work)
353 {
354 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
355 __sched_core_disable();
356 mutex_unlock(&sched_core_mutex);
357 }
358 }
359
sched_core_put(void)360 void sched_core_put(void)
361 {
362 static DECLARE_WORK(_work, __sched_core_put);
363
364 /*
365 * "There can be only one"
366 *
367 * Either this is the last one, or we don't actually need to do any
368 * 'work'. If it is the last *again*, we rely on
369 * WORK_STRUCT_PENDING_BIT.
370 */
371 if (!atomic_add_unless(&sched_core_count, -1, 1))
372 schedule_work(&_work);
373 }
374
375 #else /* !CONFIG_SCHED_CORE */
376
sched_core_enqueue(struct rq * rq,struct task_struct * p)377 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
sched_core_dequeue(struct rq * rq,struct task_struct * p)378 static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { }
379
380 #endif /* CONFIG_SCHED_CORE */
381
382 /*
383 * part of the period that we allow rt tasks to run in us.
384 * default: 0.95s
385 */
386 int sysctl_sched_rt_runtime = 950000;
387
388
389 /*
390 * Serialization rules:
391 *
392 * Lock order:
393 *
394 * p->pi_lock
395 * rq->lock
396 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
397 *
398 * rq1->lock
399 * rq2->lock where: rq1 < rq2
400 *
401 * Regular state:
402 *
403 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
404 * local CPU's rq->lock, it optionally removes the task from the runqueue and
405 * always looks at the local rq data structures to find the most eligible task
406 * to run next.
407 *
408 * Task enqueue is also under rq->lock, possibly taken from another CPU.
409 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
410 * the local CPU to avoid bouncing the runqueue state around [ see
411 * ttwu_queue_wakelist() ]
412 *
413 * Task wakeup, specifically wakeups that involve migration, are horribly
414 * complicated to avoid having to take two rq->locks.
415 *
416 * Special state:
417 *
418 * System-calls and anything external will use task_rq_lock() which acquires
419 * both p->pi_lock and rq->lock. As a consequence the state they change is
420 * stable while holding either lock:
421 *
422 * - sched_setaffinity()/
423 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
424 * - set_user_nice(): p->se.load, p->*prio
425 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
426 * p->se.load, p->rt_priority,
427 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
428 * - sched_setnuma(): p->numa_preferred_nid
429 * - sched_move_task()/
430 * cpu_cgroup_fork(): p->sched_task_group
431 * - uclamp_update_active() p->uclamp*
432 *
433 * p->state <- TASK_*:
434 *
435 * is changed locklessly using set_current_state(), __set_current_state() or
436 * set_special_state(), see their respective comments, or by
437 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
438 * concurrent self.
439 *
440 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
441 *
442 * is set by activate_task() and cleared by deactivate_task(), under
443 * rq->lock. Non-zero indicates the task is runnable, the special
444 * ON_RQ_MIGRATING state is used for migration without holding both
445 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
446 *
447 * p->on_cpu <- { 0, 1 }:
448 *
449 * is set by prepare_task() and cleared by finish_task() such that it will be
450 * set before p is scheduled-in and cleared after p is scheduled-out, both
451 * under rq->lock. Non-zero indicates the task is running on its CPU.
452 *
453 * [ The astute reader will observe that it is possible for two tasks on one
454 * CPU to have ->on_cpu = 1 at the same time. ]
455 *
456 * task_cpu(p): is changed by set_task_cpu(), the rules are:
457 *
458 * - Don't call set_task_cpu() on a blocked task:
459 *
460 * We don't care what CPU we're not running on, this simplifies hotplug,
461 * the CPU assignment of blocked tasks isn't required to be valid.
462 *
463 * - for try_to_wake_up(), called under p->pi_lock:
464 *
465 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
466 *
467 * - for migration called under rq->lock:
468 * [ see task_on_rq_migrating() in task_rq_lock() ]
469 *
470 * o move_queued_task()
471 * o detach_task()
472 *
473 * - for migration called under double_rq_lock():
474 *
475 * o __migrate_swap_task()
476 * o push_rt_task() / pull_rt_task()
477 * o push_dl_task() / pull_dl_task()
478 * o dl_task_offline_migration()
479 *
480 */
481
raw_spin_rq_lock_nested(struct rq * rq,int subclass)482 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
483 {
484 raw_spinlock_t *lock;
485
486 /* Matches synchronize_rcu() in __sched_core_enable() */
487 preempt_disable();
488 if (sched_core_disabled()) {
489 raw_spin_lock_nested(&rq->__lock, subclass);
490 /* preempt_count *MUST* be > 1 */
491 preempt_enable_no_resched();
492 return;
493 }
494
495 for (;;) {
496 lock = __rq_lockp(rq);
497 raw_spin_lock_nested(lock, subclass);
498 if (likely(lock == __rq_lockp(rq))) {
499 /* preempt_count *MUST* be > 1 */
500 preempt_enable_no_resched();
501 return;
502 }
503 raw_spin_unlock(lock);
504 }
505 }
506 EXPORT_SYMBOL_GPL(raw_spin_rq_lock_nested);
507
raw_spin_rq_trylock(struct rq * rq)508 bool raw_spin_rq_trylock(struct rq *rq)
509 {
510 raw_spinlock_t *lock;
511 bool ret;
512
513 /* Matches synchronize_rcu() in __sched_core_enable() */
514 preempt_disable();
515 if (sched_core_disabled()) {
516 ret = raw_spin_trylock(&rq->__lock);
517 preempt_enable();
518 return ret;
519 }
520
521 for (;;) {
522 lock = __rq_lockp(rq);
523 ret = raw_spin_trylock(lock);
524 if (!ret || (likely(lock == __rq_lockp(rq)))) {
525 preempt_enable();
526 return ret;
527 }
528 raw_spin_unlock(lock);
529 }
530 }
531
raw_spin_rq_unlock(struct rq * rq)532 void raw_spin_rq_unlock(struct rq *rq)
533 {
534 raw_spin_unlock(rq_lockp(rq));
535 }
536 EXPORT_SYMBOL_GPL(raw_spin_rq_unlock);
537
538 #ifdef CONFIG_SMP
539 /*
540 * double_rq_lock - safely lock two runqueues
541 */
double_rq_lock(struct rq * rq1,struct rq * rq2)542 void double_rq_lock(struct rq *rq1, struct rq *rq2)
543 {
544 lockdep_assert_irqs_disabled();
545
546 if (rq_order_less(rq2, rq1))
547 swap(rq1, rq2);
548
549 raw_spin_rq_lock(rq1);
550 if (__rq_lockp(rq1) != __rq_lockp(rq2))
551 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
552
553 double_rq_clock_clear_update(rq1, rq2);
554 }
555 EXPORT_SYMBOL_GPL(double_rq_lock);
556 #endif
557
558 /*
559 * __task_rq_lock - lock the rq @p resides on.
560 */
__task_rq_lock(struct task_struct * p,struct rq_flags * rf)561 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
562 __acquires(rq->lock)
563 {
564 struct rq *rq;
565
566 lockdep_assert_held(&p->pi_lock);
567
568 for (;;) {
569 rq = task_rq(p);
570 raw_spin_rq_lock(rq);
571 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
572 rq_pin_lock(rq, rf);
573 return rq;
574 }
575 raw_spin_rq_unlock(rq);
576
577 while (unlikely(task_on_rq_migrating(p)))
578 cpu_relax();
579 }
580 }
581 EXPORT_SYMBOL_GPL(__task_rq_lock);
582
583 /*
584 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
585 */
task_rq_lock(struct task_struct * p,struct rq_flags * rf)586 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
587 __acquires(p->pi_lock)
588 __acquires(rq->lock)
589 {
590 struct rq *rq;
591
592 for (;;) {
593 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
594 rq = task_rq(p);
595 raw_spin_rq_lock(rq);
596 /*
597 * move_queued_task() task_rq_lock()
598 *
599 * ACQUIRE (rq->lock)
600 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
601 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
602 * [S] ->cpu = new_cpu [L] task_rq()
603 * [L] ->on_rq
604 * RELEASE (rq->lock)
605 *
606 * If we observe the old CPU in task_rq_lock(), the acquire of
607 * the old rq->lock will fully serialize against the stores.
608 *
609 * If we observe the new CPU in task_rq_lock(), the address
610 * dependency headed by '[L] rq = task_rq()' and the acquire
611 * will pair with the WMB to ensure we then also see migrating.
612 */
613 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
614 rq_pin_lock(rq, rf);
615 return rq;
616 }
617 raw_spin_rq_unlock(rq);
618 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
619
620 while (unlikely(task_on_rq_migrating(p)))
621 cpu_relax();
622 }
623 }
624 EXPORT_SYMBOL_GPL(task_rq_lock);
625
626 /*
627 * RQ-clock updating methods:
628 */
629
update_rq_clock_task(struct rq * rq,s64 delta)630 static void update_rq_clock_task(struct rq *rq, s64 delta)
631 {
632 /*
633 * In theory, the compile should just see 0 here, and optimize out the call
634 * to sched_rt_avg_update. But I don't trust it...
635 */
636 s64 __maybe_unused steal = 0, irq_delta = 0;
637
638 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
639 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
640
641 /*
642 * Since irq_time is only updated on {soft,}irq_exit, we might run into
643 * this case when a previous update_rq_clock() happened inside a
644 * {soft,}irq region.
645 *
646 * When this happens, we stop ->clock_task and only update the
647 * prev_irq_time stamp to account for the part that fit, so that a next
648 * update will consume the rest. This ensures ->clock_task is
649 * monotonic.
650 *
651 * It does however cause some slight miss-attribution of {soft,}irq
652 * time, a more accurate solution would be to update the irq_time using
653 * the current rq->clock timestamp, except that would require using
654 * atomic ops.
655 */
656 if (irq_delta > delta)
657 irq_delta = delta;
658
659 rq->prev_irq_time += irq_delta;
660 delta -= irq_delta;
661 #endif
662 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
663 if (static_key_false((¶virt_steal_rq_enabled))) {
664 steal = paravirt_steal_clock(cpu_of(rq));
665 steal -= rq->prev_steal_time_rq;
666
667 if (unlikely(steal > delta))
668 steal = delta;
669
670 rq->prev_steal_time_rq += steal;
671 delta -= steal;
672 }
673 #endif
674
675 rq->clock_task += delta;
676
677 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
678 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
679 update_irq_load_avg(rq, irq_delta + steal);
680 #endif
681 update_rq_clock_pelt(rq, delta);
682 }
683
update_rq_clock(struct rq * rq)684 void update_rq_clock(struct rq *rq)
685 {
686 s64 delta;
687
688 lockdep_assert_rq_held(rq);
689
690 if (rq->clock_update_flags & RQCF_ACT_SKIP)
691 return;
692
693 #ifdef CONFIG_SCHED_DEBUG
694 if (sched_feat(WARN_DOUBLE_CLOCK))
695 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
696 rq->clock_update_flags |= RQCF_UPDATED;
697 #endif
698
699 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
700 if (delta < 0)
701 return;
702 rq->clock += delta;
703 update_rq_clock_task(rq, delta);
704 }
705 EXPORT_SYMBOL_GPL(update_rq_clock);
706
707 #ifdef CONFIG_SCHED_HRTICK
708 /*
709 * Use HR-timers to deliver accurate preemption points.
710 */
711
hrtick_clear(struct rq * rq)712 static void hrtick_clear(struct rq *rq)
713 {
714 if (hrtimer_active(&rq->hrtick_timer))
715 hrtimer_cancel(&rq->hrtick_timer);
716 }
717
718 /*
719 * High-resolution timer tick.
720 * Runs from hardirq context with interrupts disabled.
721 */
hrtick(struct hrtimer * timer)722 static enum hrtimer_restart hrtick(struct hrtimer *timer)
723 {
724 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
725 struct rq_flags rf;
726
727 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
728
729 rq_lock(rq, &rf);
730 update_rq_clock(rq);
731 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
732 rq_unlock(rq, &rf);
733
734 return HRTIMER_NORESTART;
735 }
736
737 #ifdef CONFIG_SMP
738
__hrtick_restart(struct rq * rq)739 static void __hrtick_restart(struct rq *rq)
740 {
741 struct hrtimer *timer = &rq->hrtick_timer;
742 ktime_t time = rq->hrtick_time;
743
744 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
745 }
746
747 /*
748 * called from hardirq (IPI) context
749 */
__hrtick_start(void * arg)750 static void __hrtick_start(void *arg)
751 {
752 struct rq *rq = arg;
753 struct rq_flags rf;
754
755 rq_lock(rq, &rf);
756 __hrtick_restart(rq);
757 rq_unlock(rq, &rf);
758 }
759
760 /*
761 * Called to set the hrtick timer state.
762 *
763 * called with rq->lock held and irqs disabled
764 */
hrtick_start(struct rq * rq,u64 delay)765 void hrtick_start(struct rq *rq, u64 delay)
766 {
767 struct hrtimer *timer = &rq->hrtick_timer;
768 s64 delta;
769
770 /*
771 * Don't schedule slices shorter than 10000ns, that just
772 * doesn't make sense and can cause timer DoS.
773 */
774 delta = max_t(s64, delay, 10000LL);
775 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
776
777 if (rq == this_rq())
778 __hrtick_restart(rq);
779 else
780 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
781 }
782
783 #else
784 /*
785 * Called to set the hrtick timer state.
786 *
787 * called with rq->lock held and irqs disabled
788 */
hrtick_start(struct rq * rq,u64 delay)789 void hrtick_start(struct rq *rq, u64 delay)
790 {
791 /*
792 * Don't schedule slices shorter than 10000ns, that just
793 * doesn't make sense. Rely on vruntime for fairness.
794 */
795 delay = max_t(u64, delay, 10000LL);
796 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
797 HRTIMER_MODE_REL_PINNED_HARD);
798 }
799
800 #endif /* CONFIG_SMP */
801
hrtick_rq_init(struct rq * rq)802 static void hrtick_rq_init(struct rq *rq)
803 {
804 #ifdef CONFIG_SMP
805 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
806 #endif
807 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
808 rq->hrtick_timer.function = hrtick;
809 }
810 #else /* CONFIG_SCHED_HRTICK */
hrtick_clear(struct rq * rq)811 static inline void hrtick_clear(struct rq *rq)
812 {
813 }
814
hrtick_rq_init(struct rq * rq)815 static inline void hrtick_rq_init(struct rq *rq)
816 {
817 }
818 #endif /* CONFIG_SCHED_HRTICK */
819
820 /*
821 * cmpxchg based fetch_or, macro so it works for different integer types
822 */
823 #define fetch_or(ptr, mask) \
824 ({ \
825 typeof(ptr) _ptr = (ptr); \
826 typeof(mask) _mask = (mask); \
827 typeof(*_ptr) _old, _val = *_ptr; \
828 \
829 for (;;) { \
830 _old = cmpxchg(_ptr, _val, _val | _mask); \
831 if (_old == _val) \
832 break; \
833 _val = _old; \
834 } \
835 _old; \
836 })
837
838 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
839 /*
840 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
841 * this avoids any races wrt polling state changes and thereby avoids
842 * spurious IPIs.
843 */
set_nr_and_not_polling(struct task_struct * p)844 static bool set_nr_and_not_polling(struct task_struct *p)
845 {
846 struct thread_info *ti = task_thread_info(p);
847 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
848 }
849
850 /*
851 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
852 *
853 * If this returns true, then the idle task promises to call
854 * sched_ttwu_pending() and reschedule soon.
855 */
set_nr_if_polling(struct task_struct * p)856 static bool set_nr_if_polling(struct task_struct *p)
857 {
858 struct thread_info *ti = task_thread_info(p);
859 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
860
861 for (;;) {
862 if (!(val & _TIF_POLLING_NRFLAG))
863 return false;
864 if (val & _TIF_NEED_RESCHED)
865 return true;
866 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
867 if (old == val)
868 break;
869 val = old;
870 }
871 return true;
872 }
873
874 #else
set_nr_and_not_polling(struct task_struct * p)875 static bool set_nr_and_not_polling(struct task_struct *p)
876 {
877 set_tsk_need_resched(p);
878 return true;
879 }
880
881 #ifdef CONFIG_SMP
set_nr_if_polling(struct task_struct * p)882 static bool set_nr_if_polling(struct task_struct *p)
883 {
884 return false;
885 }
886 #endif
887 #endif
888
__wake_q_add(struct wake_q_head * head,struct task_struct * task)889 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
890 {
891 struct wake_q_node *node = &task->wake_q;
892
893 /*
894 * Atomically grab the task, if ->wake_q is !nil already it means
895 * it's already queued (either by us or someone else) and will get the
896 * wakeup due to that.
897 *
898 * In order to ensure that a pending wakeup will observe our pending
899 * state, even in the failed case, an explicit smp_mb() must be used.
900 */
901 smp_mb__before_atomic();
902 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
903 return false;
904
905 /*
906 * The head is context local, there can be no concurrency.
907 */
908 *head->lastp = node;
909 head->lastp = &node->next;
910 head->count++;
911 return true;
912 }
913
914 /**
915 * wake_q_add() - queue a wakeup for 'later' waking.
916 * @head: the wake_q_head to add @task to
917 * @task: the task to queue for 'later' wakeup
918 *
919 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
920 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
921 * instantly.
922 *
923 * This function must be used as-if it were wake_up_process(); IOW the task
924 * must be ready to be woken at this location.
925 */
wake_q_add(struct wake_q_head * head,struct task_struct * task)926 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
927 {
928 if (__wake_q_add(head, task))
929 get_task_struct(task);
930 }
931
932 /**
933 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
934 * @head: the wake_q_head to add @task to
935 * @task: the task to queue for 'later' wakeup
936 *
937 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
938 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
939 * instantly.
940 *
941 * This function must be used as-if it were wake_up_process(); IOW the task
942 * must be ready to be woken at this location.
943 *
944 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
945 * that already hold reference to @task can call the 'safe' version and trust
946 * wake_q to do the right thing depending whether or not the @task is already
947 * queued for wakeup.
948 */
wake_q_add_safe(struct wake_q_head * head,struct task_struct * task)949 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
950 {
951 if (!__wake_q_add(head, task))
952 put_task_struct(task);
953 }
954
wake_up_q(struct wake_q_head * head)955 void wake_up_q(struct wake_q_head *head)
956 {
957 struct wake_q_node *node = head->first;
958
959 while (node != WAKE_Q_TAIL) {
960 struct task_struct *task;
961
962 task = container_of(node, struct task_struct, wake_q);
963 /* Task can safely be re-inserted now: */
964 node = node->next;
965 task->wake_q.next = NULL;
966 task->wake_q_count = head->count;
967
968 /*
969 * wake_up_process() executes a full barrier, which pairs with
970 * the queueing in wake_q_add() so as not to miss wakeups.
971 */
972 wake_up_process(task);
973 task->wake_q_count = 0;
974 put_task_struct(task);
975 }
976 }
977
978 /*
979 * resched_curr - mark rq's current task 'to be rescheduled now'.
980 *
981 * On UP this means the setting of the need_resched flag, on SMP it
982 * might also involve a cross-CPU call to trigger the scheduler on
983 * the target CPU.
984 */
resched_curr(struct rq * rq)985 void resched_curr(struct rq *rq)
986 {
987 struct task_struct *curr = rq->curr;
988 int cpu;
989
990 lockdep_assert_rq_held(rq);
991
992 if (test_tsk_need_resched(curr))
993 return;
994
995 cpu = cpu_of(rq);
996
997 if (cpu == smp_processor_id()) {
998 set_tsk_need_resched(curr);
999 set_preempt_need_resched();
1000 return;
1001 }
1002
1003 if (set_nr_and_not_polling(curr))
1004 smp_send_reschedule(cpu);
1005 else
1006 trace_sched_wake_idle_without_ipi(cpu);
1007 }
1008 EXPORT_SYMBOL_GPL(resched_curr);
1009
resched_cpu(int cpu)1010 void resched_cpu(int cpu)
1011 {
1012 struct rq *rq = cpu_rq(cpu);
1013 unsigned long flags;
1014
1015 raw_spin_rq_lock_irqsave(rq, flags);
1016 if (cpu_online(cpu) || cpu == smp_processor_id())
1017 resched_curr(rq);
1018 raw_spin_rq_unlock_irqrestore(rq, flags);
1019 }
1020
1021 #ifdef CONFIG_SMP
1022 #ifdef CONFIG_NO_HZ_COMMON
1023 /*
1024 * In the semi idle case, use the nearest busy CPU for migrating timers
1025 * from an idle CPU. This is good for power-savings.
1026 *
1027 * We don't do similar optimization for completely idle system, as
1028 * selecting an idle CPU will add more delays to the timers than intended
1029 * (as that CPU's timer base may not be uptodate wrt jiffies etc).
1030 */
get_nohz_timer_target(void)1031 int get_nohz_timer_target(void)
1032 {
1033 int i, cpu = smp_processor_id(), default_cpu = -1;
1034 struct sched_domain *sd;
1035 const struct cpumask *hk_mask;
1036 bool done = false;
1037
1038 trace_android_rvh_get_nohz_timer_target(&cpu, &done);
1039 if (done)
1040 return cpu;
1041
1042 if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
1043 if (!idle_cpu(cpu))
1044 return cpu;
1045 default_cpu = cpu;
1046 }
1047
1048 hk_mask = housekeeping_cpumask(HK_FLAG_TIMER);
1049
1050 rcu_read_lock();
1051 for_each_domain(cpu, sd) {
1052 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1053 if (cpu == i)
1054 continue;
1055
1056 if (!idle_cpu(i)) {
1057 cpu = i;
1058 goto unlock;
1059 }
1060 }
1061 }
1062
1063 if (default_cpu == -1)
1064 default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
1065 cpu = default_cpu;
1066 unlock:
1067 rcu_read_unlock();
1068 return cpu;
1069 }
1070
1071 /*
1072 * When add_timer_on() enqueues a timer into the timer wheel of an
1073 * idle CPU then this timer might expire before the next timer event
1074 * which is scheduled to wake up that CPU. In case of a completely
1075 * idle system the next event might even be infinite time into the
1076 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1077 * leaves the inner idle loop so the newly added timer is taken into
1078 * account when the CPU goes back to idle and evaluates the timer
1079 * wheel for the next timer event.
1080 */
wake_up_idle_cpu(int cpu)1081 static void wake_up_idle_cpu(int cpu)
1082 {
1083 struct rq *rq = cpu_rq(cpu);
1084
1085 if (cpu == smp_processor_id())
1086 return;
1087
1088 if (set_nr_and_not_polling(rq->idle))
1089 smp_send_reschedule(cpu);
1090 else
1091 trace_sched_wake_idle_without_ipi(cpu);
1092 }
1093
wake_up_full_nohz_cpu(int cpu)1094 static bool wake_up_full_nohz_cpu(int cpu)
1095 {
1096 /*
1097 * We just need the target to call irq_exit() and re-evaluate
1098 * the next tick. The nohz full kick at least implies that.
1099 * If needed we can still optimize that later with an
1100 * empty IRQ.
1101 */
1102 if (cpu_is_offline(cpu))
1103 return true; /* Don't try to wake offline CPUs. */
1104 if (tick_nohz_full_cpu(cpu)) {
1105 if (cpu != smp_processor_id() ||
1106 tick_nohz_tick_stopped())
1107 tick_nohz_full_kick_cpu(cpu);
1108 return true;
1109 }
1110
1111 return false;
1112 }
1113
1114 /*
1115 * Wake up the specified CPU. If the CPU is going offline, it is the
1116 * caller's responsibility to deal with the lost wakeup, for example,
1117 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1118 */
wake_up_nohz_cpu(int cpu)1119 void wake_up_nohz_cpu(int cpu)
1120 {
1121 if (!wake_up_full_nohz_cpu(cpu))
1122 wake_up_idle_cpu(cpu);
1123 }
1124
nohz_csd_func(void * info)1125 static void nohz_csd_func(void *info)
1126 {
1127 struct rq *rq = info;
1128 int cpu = cpu_of(rq);
1129 unsigned int flags;
1130
1131 /*
1132 * Release the rq::nohz_csd.
1133 */
1134 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1135 WARN_ON(!(flags & NOHZ_KICK_MASK));
1136
1137 rq->idle_balance = idle_cpu(cpu);
1138 if (rq->idle_balance && !need_resched()) {
1139 rq->nohz_idle_balance = flags;
1140 raise_softirq_irqoff(SCHED_SOFTIRQ);
1141 }
1142 }
1143
1144 #endif /* CONFIG_NO_HZ_COMMON */
1145
1146 #ifdef CONFIG_NO_HZ_FULL
sched_can_stop_tick(struct rq * rq)1147 bool sched_can_stop_tick(struct rq *rq)
1148 {
1149 int fifo_nr_running;
1150
1151 /* Deadline tasks, even if single, need the tick */
1152 if (rq->dl.dl_nr_running)
1153 return false;
1154
1155 /*
1156 * If there are more than one RR tasks, we need the tick to affect the
1157 * actual RR behaviour.
1158 */
1159 if (rq->rt.rr_nr_running) {
1160 if (rq->rt.rr_nr_running == 1)
1161 return true;
1162 else
1163 return false;
1164 }
1165
1166 /*
1167 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1168 * forced preemption between FIFO tasks.
1169 */
1170 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1171 if (fifo_nr_running)
1172 return true;
1173
1174 /*
1175 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
1176 * if there's more than one we need the tick for involuntary
1177 * preemption.
1178 */
1179 if (rq->nr_running > 1)
1180 return false;
1181
1182 return true;
1183 }
1184 #endif /* CONFIG_NO_HZ_FULL */
1185 #endif /* CONFIG_SMP */
1186
1187 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1188 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1189 /*
1190 * Iterate task_group tree rooted at *from, calling @down when first entering a
1191 * node and @up when leaving it for the final time.
1192 *
1193 * Caller must hold rcu_lock or sufficient equivalent.
1194 */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)1195 int walk_tg_tree_from(struct task_group *from,
1196 tg_visitor down, tg_visitor up, void *data)
1197 {
1198 struct task_group *parent, *child;
1199 int ret;
1200
1201 parent = from;
1202
1203 down:
1204 ret = (*down)(parent, data);
1205 if (ret)
1206 goto out;
1207 list_for_each_entry_rcu(child, &parent->children, siblings) {
1208 parent = child;
1209 goto down;
1210
1211 up:
1212 continue;
1213 }
1214 ret = (*up)(parent, data);
1215 if (ret || parent == from)
1216 goto out;
1217
1218 child = parent;
1219 parent = parent->parent;
1220 if (parent)
1221 goto up;
1222 out:
1223 return ret;
1224 }
1225
tg_nop(struct task_group * tg,void * data)1226 int tg_nop(struct task_group *tg, void *data)
1227 {
1228 return 0;
1229 }
1230 #endif
1231
set_load_weight(struct task_struct * p,bool update_load)1232 static void set_load_weight(struct task_struct *p, bool update_load)
1233 {
1234 int prio = p->static_prio - MAX_RT_PRIO;
1235 struct load_weight *load = &p->se.load;
1236
1237 /*
1238 * SCHED_IDLE tasks get minimal weight:
1239 */
1240 if (task_has_idle_policy(p)) {
1241 load->weight = scale_load(WEIGHT_IDLEPRIO);
1242 load->inv_weight = WMULT_IDLEPRIO;
1243 return;
1244 }
1245
1246 /*
1247 * SCHED_OTHER tasks have to update their load when changing their
1248 * weight
1249 */
1250 if (update_load && p->sched_class == &fair_sched_class) {
1251 reweight_task(p, prio);
1252 } else {
1253 load->weight = scale_load(sched_prio_to_weight[prio]);
1254 load->inv_weight = sched_prio_to_wmult[prio];
1255 }
1256 }
1257
1258 #ifdef CONFIG_UCLAMP_TASK
1259 /*
1260 * Serializes updates of utilization clamp values
1261 *
1262 * The (slow-path) user-space triggers utilization clamp value updates which
1263 * can require updates on (fast-path) scheduler's data structures used to
1264 * support enqueue/dequeue operations.
1265 * While the per-CPU rq lock protects fast-path update operations, user-space
1266 * requests are serialized using a mutex to reduce the risk of conflicting
1267 * updates or API abuses.
1268 */
1269 static DEFINE_MUTEX(uclamp_mutex);
1270
1271 /* Max allowed minimum utilization */
1272 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1273
1274 /* Max allowed maximum utilization */
1275 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1276
1277 /*
1278 * By default RT tasks run at the maximum performance point/capacity of the
1279 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1280 * SCHED_CAPACITY_SCALE.
1281 *
1282 * This knob allows admins to change the default behavior when uclamp is being
1283 * used. In battery powered devices, particularly, running at the maximum
1284 * capacity and frequency will increase energy consumption and shorten the
1285 * battery life.
1286 *
1287 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1288 *
1289 * This knob will not override the system default sched_util_clamp_min defined
1290 * above.
1291 */
1292 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1293
1294 /* All clamps are required to be less or equal than these values */
1295 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1296
1297 /*
1298 * This static key is used to reduce the uclamp overhead in the fast path. It
1299 * primarily disables the call to uclamp_rq_{inc, dec}() in
1300 * enqueue/dequeue_task().
1301 *
1302 * This allows users to continue to enable uclamp in their kernel config with
1303 * minimum uclamp overhead in the fast path.
1304 *
1305 * As soon as userspace modifies any of the uclamp knobs, the static key is
1306 * enabled, since we have an actual users that make use of uclamp
1307 * functionality.
1308 *
1309 * The knobs that would enable this static key are:
1310 *
1311 * * A task modifying its uclamp value with sched_setattr().
1312 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1313 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1314 */
1315 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1316 EXPORT_SYMBOL_GPL(sched_uclamp_used);
1317
1318 /* Integer rounded range for each bucket */
1319 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
1320
1321 #define for_each_clamp_id(clamp_id) \
1322 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
1323
uclamp_bucket_id(unsigned int clamp_value)1324 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
1325 {
1326 return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
1327 }
1328
uclamp_none(enum uclamp_id clamp_id)1329 static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
1330 {
1331 if (clamp_id == UCLAMP_MIN)
1332 return 0;
1333 return SCHED_CAPACITY_SCALE;
1334 }
1335
uclamp_se_set(struct uclamp_se * uc_se,unsigned int value,bool user_defined)1336 static inline void uclamp_se_set(struct uclamp_se *uc_se,
1337 unsigned int value, bool user_defined)
1338 {
1339 uc_se->value = value;
1340 uc_se->bucket_id = uclamp_bucket_id(value);
1341 uc_se->user_defined = user_defined;
1342 }
1343
1344 static inline unsigned int
uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1345 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1346 unsigned int clamp_value)
1347 {
1348 /*
1349 * Avoid blocked utilization pushing up the frequency when we go
1350 * idle (which drops the max-clamp) by retaining the last known
1351 * max-clamp.
1352 */
1353 if (clamp_id == UCLAMP_MAX) {
1354 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1355 return clamp_value;
1356 }
1357
1358 return uclamp_none(UCLAMP_MIN);
1359 }
1360
uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1361 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1362 unsigned int clamp_value)
1363 {
1364 /* Reset max-clamp retention only on idle exit */
1365 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1366 return;
1367
1368 uclamp_rq_set(rq, clamp_id, clamp_value);
1369 }
1370
1371 static inline
uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1372 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1373 unsigned int clamp_value)
1374 {
1375 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1376 int bucket_id = UCLAMP_BUCKETS - 1;
1377
1378 /*
1379 * Since both min and max clamps are max aggregated, find the
1380 * top most bucket with tasks in.
1381 */
1382 for ( ; bucket_id >= 0; bucket_id--) {
1383 if (!bucket[bucket_id].tasks)
1384 continue;
1385 return bucket[bucket_id].value;
1386 }
1387
1388 /* No tasks -- default clamp values */
1389 return uclamp_idle_value(rq, clamp_id, clamp_value);
1390 }
1391
__uclamp_update_util_min_rt_default(struct task_struct * p)1392 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1393 {
1394 unsigned int default_util_min;
1395 struct uclamp_se *uc_se;
1396
1397 lockdep_assert_held(&p->pi_lock);
1398
1399 uc_se = &p->uclamp_req[UCLAMP_MIN];
1400
1401 /* Only sync if user didn't override the default */
1402 if (uc_se->user_defined)
1403 return;
1404
1405 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1406 uclamp_se_set(uc_se, default_util_min, false);
1407 }
1408
uclamp_update_util_min_rt_default(struct task_struct * p)1409 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1410 {
1411 struct rq_flags rf;
1412 struct rq *rq;
1413
1414 if (!rt_task(p))
1415 return;
1416
1417 /* Protect updates to p->uclamp_* */
1418 rq = task_rq_lock(p, &rf);
1419 __uclamp_update_util_min_rt_default(p);
1420 task_rq_unlock(rq, p, &rf);
1421 }
1422
uclamp_sync_util_min_rt_default(void)1423 static void uclamp_sync_util_min_rt_default(void)
1424 {
1425 struct task_struct *g, *p;
1426
1427 /*
1428 * copy_process() sysctl_uclamp
1429 * uclamp_min_rt = X;
1430 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1431 * // link thread smp_mb__after_spinlock()
1432 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1433 * sched_post_fork() for_each_process_thread()
1434 * __uclamp_sync_rt() __uclamp_sync_rt()
1435 *
1436 * Ensures that either sched_post_fork() will observe the new
1437 * uclamp_min_rt or for_each_process_thread() will observe the new
1438 * task.
1439 */
1440 read_lock(&tasklist_lock);
1441 smp_mb__after_spinlock();
1442 read_unlock(&tasklist_lock);
1443
1444 rcu_read_lock();
1445 for_each_process_thread(g, p)
1446 uclamp_update_util_min_rt_default(p);
1447 rcu_read_unlock();
1448 }
1449
1450 static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct * p,enum uclamp_id clamp_id)1451 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1452 {
1453 /* Copy by value as we could modify it */
1454 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1455 #ifdef CONFIG_UCLAMP_TASK_GROUP
1456 unsigned int tg_min, tg_max, value;
1457
1458 /*
1459 * Tasks in autogroups or root task group will be
1460 * restricted by system defaults.
1461 */
1462 if (task_group_is_autogroup(task_group(p)))
1463 return uc_req;
1464 if (task_group(p) == &root_task_group)
1465 return uc_req;
1466
1467 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1468 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1469 value = uc_req.value;
1470 value = clamp(value, tg_min, tg_max);
1471 uclamp_se_set(&uc_req, value, false);
1472 #endif
1473
1474 return uc_req;
1475 }
1476
1477 /*
1478 * The effective clamp bucket index of a task depends on, by increasing
1479 * priority:
1480 * - the task specific clamp value, when explicitly requested from userspace
1481 * - the task group effective clamp value, for tasks not either in the root
1482 * group or in an autogroup
1483 * - the system default clamp value, defined by the sysadmin
1484 */
1485 static inline struct uclamp_se
uclamp_eff_get(struct task_struct * p,enum uclamp_id clamp_id)1486 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1487 {
1488 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1489 struct uclamp_se uc_max = uclamp_default[clamp_id];
1490 struct uclamp_se uc_eff;
1491 int ret = 0;
1492
1493 trace_android_rvh_uclamp_eff_get(p, clamp_id, &uc_max, &uc_eff, &ret);
1494 if (ret)
1495 return uc_eff;
1496
1497 /* System default restrictions always apply */
1498 if (unlikely(uc_req.value > uc_max.value))
1499 return uc_max;
1500
1501 return uc_req;
1502 }
1503
uclamp_eff_value(struct task_struct * p,enum uclamp_id clamp_id)1504 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1505 {
1506 struct uclamp_se uc_eff;
1507
1508 /* Task currently refcounted: use back-annotated (effective) value */
1509 if (p->uclamp[clamp_id].active)
1510 return (unsigned long)p->uclamp[clamp_id].value;
1511
1512 uc_eff = uclamp_eff_get(p, clamp_id);
1513
1514 return (unsigned long)uc_eff.value;
1515 }
1516 EXPORT_SYMBOL_GPL(uclamp_eff_value);
1517
1518 /*
1519 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1520 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1521 * updates the rq's clamp value if required.
1522 *
1523 * Tasks can have a task-specific value requested from user-space, track
1524 * within each bucket the maximum value for tasks refcounted in it.
1525 * This "local max aggregation" allows to track the exact "requested" value
1526 * for each bucket when all its RUNNABLE tasks require the same clamp.
1527 */
uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1528 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1529 enum uclamp_id clamp_id)
1530 {
1531 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1532 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1533 struct uclamp_bucket *bucket;
1534
1535 lockdep_assert_rq_held(rq);
1536
1537 /* Update task effective clamp */
1538 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1539
1540 bucket = &uc_rq->bucket[uc_se->bucket_id];
1541 bucket->tasks++;
1542 uc_se->active = true;
1543
1544 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1545
1546 /*
1547 * Local max aggregation: rq buckets always track the max
1548 * "requested" clamp value of its RUNNABLE tasks.
1549 */
1550 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1551 bucket->value = uc_se->value;
1552
1553 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1554 uclamp_rq_set(rq, clamp_id, uc_se->value);
1555 }
1556
1557 /*
1558 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1559 * is released. If this is the last task reference counting the rq's max
1560 * active clamp value, then the rq's clamp value is updated.
1561 *
1562 * Both refcounted tasks and rq's cached clamp values are expected to be
1563 * always valid. If it's detected they are not, as defensive programming,
1564 * enforce the expected state and warn.
1565 */
uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1566 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1567 enum uclamp_id clamp_id)
1568 {
1569 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1570 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1571 struct uclamp_bucket *bucket;
1572 unsigned int bkt_clamp;
1573 unsigned int rq_clamp;
1574
1575 lockdep_assert_rq_held(rq);
1576
1577 /*
1578 * If sched_uclamp_used was enabled after task @p was enqueued,
1579 * we could end up with unbalanced call to uclamp_rq_dec_id().
1580 *
1581 * In this case the uc_se->active flag should be false since no uclamp
1582 * accounting was performed at enqueue time and we can just return
1583 * here.
1584 *
1585 * Need to be careful of the following enqueue/dequeue ordering
1586 * problem too
1587 *
1588 * enqueue(taskA)
1589 * // sched_uclamp_used gets enabled
1590 * enqueue(taskB)
1591 * dequeue(taskA)
1592 * // Must not decrement bucket->tasks here
1593 * dequeue(taskB)
1594 *
1595 * where we could end up with stale data in uc_se and
1596 * bucket[uc_se->bucket_id].
1597 *
1598 * The following check here eliminates the possibility of such race.
1599 */
1600 if (unlikely(!uc_se->active))
1601 return;
1602
1603 bucket = &uc_rq->bucket[uc_se->bucket_id];
1604
1605 SCHED_WARN_ON(!bucket->tasks);
1606 if (likely(bucket->tasks))
1607 bucket->tasks--;
1608
1609 uc_se->active = false;
1610
1611 /*
1612 * Keep "local max aggregation" simple and accept to (possibly)
1613 * overboost some RUNNABLE tasks in the same bucket.
1614 * The rq clamp bucket value is reset to its base value whenever
1615 * there are no more RUNNABLE tasks refcounting it.
1616 */
1617 if (likely(bucket->tasks))
1618 return;
1619
1620 rq_clamp = uclamp_rq_get(rq, clamp_id);
1621 /*
1622 * Defensive programming: this should never happen. If it happens,
1623 * e.g. due to future modification, warn and fixup the expected value.
1624 */
1625 SCHED_WARN_ON(bucket->value > rq_clamp);
1626 if (bucket->value >= rq_clamp) {
1627 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1628 uclamp_rq_set(rq, clamp_id, bkt_clamp);
1629 }
1630 }
1631
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1632 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1633 {
1634 enum uclamp_id clamp_id;
1635
1636 /*
1637 * Avoid any overhead until uclamp is actually used by the userspace.
1638 *
1639 * The condition is constructed such that a NOP is generated when
1640 * sched_uclamp_used is disabled.
1641 */
1642 if (!static_branch_unlikely(&sched_uclamp_used))
1643 return;
1644
1645 if (unlikely(!p->sched_class->uclamp_enabled))
1646 return;
1647
1648 for_each_clamp_id(clamp_id)
1649 uclamp_rq_inc_id(rq, p, clamp_id);
1650
1651 /* Reset clamp idle holding when there is one RUNNABLE task */
1652 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1653 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1654 }
1655
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1656 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1657 {
1658 enum uclamp_id clamp_id;
1659
1660 /*
1661 * Avoid any overhead until uclamp is actually used by the userspace.
1662 *
1663 * The condition is constructed such that a NOP is generated when
1664 * sched_uclamp_used is disabled.
1665 */
1666 if (!static_branch_unlikely(&sched_uclamp_used))
1667 return;
1668
1669 if (unlikely(!p->sched_class->uclamp_enabled))
1670 return;
1671
1672 for_each_clamp_id(clamp_id)
1673 uclamp_rq_dec_id(rq, p, clamp_id);
1674 }
1675
uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1676 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1677 enum uclamp_id clamp_id)
1678 {
1679 if (!p->uclamp[clamp_id].active)
1680 return;
1681
1682 uclamp_rq_dec_id(rq, p, clamp_id);
1683 uclamp_rq_inc_id(rq, p, clamp_id);
1684
1685 /*
1686 * Make sure to clear the idle flag if we've transiently reached 0
1687 * active tasks on rq.
1688 */
1689 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1690 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1691 }
1692
1693 static inline void
uclamp_update_active(struct task_struct * p)1694 uclamp_update_active(struct task_struct *p)
1695 {
1696 enum uclamp_id clamp_id;
1697 struct rq_flags rf;
1698 struct rq *rq;
1699
1700 /*
1701 * Lock the task and the rq where the task is (or was) queued.
1702 *
1703 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1704 * price to pay to safely serialize util_{min,max} updates with
1705 * enqueues, dequeues and migration operations.
1706 * This is the same locking schema used by __set_cpus_allowed_ptr().
1707 */
1708 rq = task_rq_lock(p, &rf);
1709
1710 /*
1711 * Setting the clamp bucket is serialized by task_rq_lock().
1712 * If the task is not yet RUNNABLE and its task_struct is not
1713 * affecting a valid clamp bucket, the next time it's enqueued,
1714 * it will already see the updated clamp bucket value.
1715 */
1716 for_each_clamp_id(clamp_id)
1717 uclamp_rq_reinc_id(rq, p, clamp_id);
1718
1719 task_rq_unlock(rq, p, &rf);
1720 }
1721
1722 #ifdef CONFIG_UCLAMP_TASK_GROUP
1723 static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state * css)1724 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1725 {
1726 struct css_task_iter it;
1727 struct task_struct *p;
1728
1729 css_task_iter_start(css, 0, &it);
1730 while ((p = css_task_iter_next(&it)))
1731 uclamp_update_active(p);
1732 css_task_iter_end(&it);
1733 }
1734
1735 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
uclamp_update_root_tg(void)1736 static void uclamp_update_root_tg(void)
1737 {
1738 struct task_group *tg = &root_task_group;
1739
1740 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1741 sysctl_sched_uclamp_util_min, false);
1742 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1743 sysctl_sched_uclamp_util_max, false);
1744
1745 rcu_read_lock();
1746 cpu_util_update_eff(&root_task_group.css);
1747 rcu_read_unlock();
1748 }
1749 #else
uclamp_update_root_tg(void)1750 static void uclamp_update_root_tg(void) { }
1751 #endif
1752
sysctl_sched_uclamp_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1753 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
1754 void *buffer, size_t *lenp, loff_t *ppos)
1755 {
1756 bool update_root_tg = false;
1757 int old_min, old_max, old_min_rt;
1758 int result;
1759
1760 mutex_lock(&uclamp_mutex);
1761 old_min = sysctl_sched_uclamp_util_min;
1762 old_max = sysctl_sched_uclamp_util_max;
1763 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1764
1765 result = proc_dointvec(table, write, buffer, lenp, ppos);
1766 if (result)
1767 goto undo;
1768 if (!write)
1769 goto done;
1770
1771 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1772 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1773 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1774
1775 result = -EINVAL;
1776 goto undo;
1777 }
1778
1779 if (old_min != sysctl_sched_uclamp_util_min) {
1780 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1781 sysctl_sched_uclamp_util_min, false);
1782 update_root_tg = true;
1783 }
1784 if (old_max != sysctl_sched_uclamp_util_max) {
1785 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1786 sysctl_sched_uclamp_util_max, false);
1787 update_root_tg = true;
1788 }
1789
1790 if (update_root_tg) {
1791 static_branch_enable(&sched_uclamp_used);
1792 uclamp_update_root_tg();
1793 }
1794
1795 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1796 static_branch_enable(&sched_uclamp_used);
1797 uclamp_sync_util_min_rt_default();
1798 }
1799
1800 /*
1801 * We update all RUNNABLE tasks only when task groups are in use.
1802 * Otherwise, keep it simple and do just a lazy update at each next
1803 * task enqueue time.
1804 */
1805
1806 goto done;
1807
1808 undo:
1809 sysctl_sched_uclamp_util_min = old_min;
1810 sysctl_sched_uclamp_util_max = old_max;
1811 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1812 done:
1813 mutex_unlock(&uclamp_mutex);
1814
1815 return result;
1816 }
1817
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)1818 static int uclamp_validate(struct task_struct *p,
1819 const struct sched_attr *attr)
1820 {
1821 int util_min = p->uclamp_req[UCLAMP_MIN].value;
1822 int util_max = p->uclamp_req[UCLAMP_MAX].value;
1823
1824 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
1825 util_min = attr->sched_util_min;
1826
1827 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
1828 return -EINVAL;
1829 }
1830
1831 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
1832 util_max = attr->sched_util_max;
1833
1834 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
1835 return -EINVAL;
1836 }
1837
1838 if (util_min != -1 && util_max != -1 && util_min > util_max)
1839 return -EINVAL;
1840
1841 /*
1842 * We have valid uclamp attributes; make sure uclamp is enabled.
1843 *
1844 * We need to do that here, because enabling static branches is a
1845 * blocking operation which obviously cannot be done while holding
1846 * scheduler locks.
1847 */
1848 static_branch_enable(&sched_uclamp_used);
1849
1850 return 0;
1851 }
1852
uclamp_reset(const struct sched_attr * attr,enum uclamp_id clamp_id,struct uclamp_se * uc_se)1853 static bool uclamp_reset(const struct sched_attr *attr,
1854 enum uclamp_id clamp_id,
1855 struct uclamp_se *uc_se)
1856 {
1857 /* Reset on sched class change for a non user-defined clamp value. */
1858 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
1859 !uc_se->user_defined)
1860 return true;
1861
1862 /* Reset on sched_util_{min,max} == -1. */
1863 if (clamp_id == UCLAMP_MIN &&
1864 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1865 attr->sched_util_min == -1) {
1866 return true;
1867 }
1868
1869 if (clamp_id == UCLAMP_MAX &&
1870 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1871 attr->sched_util_max == -1) {
1872 return true;
1873 }
1874
1875 return false;
1876 }
1877
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)1878 static void __setscheduler_uclamp(struct task_struct *p,
1879 const struct sched_attr *attr)
1880 {
1881 enum uclamp_id clamp_id;
1882
1883 for_each_clamp_id(clamp_id) {
1884 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
1885 unsigned int value;
1886
1887 if (!uclamp_reset(attr, clamp_id, uc_se))
1888 continue;
1889
1890 /*
1891 * RT by default have a 100% boost value that could be modified
1892 * at runtime.
1893 */
1894 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
1895 value = sysctl_sched_uclamp_util_min_rt_default;
1896 else
1897 value = uclamp_none(clamp_id);
1898
1899 uclamp_se_set(uc_se, value, false);
1900
1901 }
1902
1903 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
1904 return;
1905
1906 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1907 attr->sched_util_min != -1) {
1908 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
1909 attr->sched_util_min, true);
1910 trace_android_vh_setscheduler_uclamp(p, UCLAMP_MIN, attr->sched_util_min);
1911 }
1912
1913 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1914 attr->sched_util_max != -1) {
1915 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
1916 attr->sched_util_max, true);
1917 trace_android_vh_setscheduler_uclamp(p, UCLAMP_MAX, attr->sched_util_max);
1918 }
1919 }
1920
uclamp_fork(struct task_struct * p)1921 static void uclamp_fork(struct task_struct *p)
1922 {
1923 enum uclamp_id clamp_id;
1924
1925 /*
1926 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1927 * as the task is still at its early fork stages.
1928 */
1929 for_each_clamp_id(clamp_id)
1930 p->uclamp[clamp_id].active = false;
1931
1932 if (likely(!p->sched_reset_on_fork))
1933 return;
1934
1935 for_each_clamp_id(clamp_id) {
1936 uclamp_se_set(&p->uclamp_req[clamp_id],
1937 uclamp_none(clamp_id), false);
1938 }
1939 }
1940
uclamp_post_fork(struct task_struct * p)1941 static void uclamp_post_fork(struct task_struct *p)
1942 {
1943 uclamp_update_util_min_rt_default(p);
1944 }
1945
init_uclamp_rq(struct rq * rq)1946 static void __init init_uclamp_rq(struct rq *rq)
1947 {
1948 enum uclamp_id clamp_id;
1949 struct uclamp_rq *uc_rq = rq->uclamp;
1950
1951 for_each_clamp_id(clamp_id) {
1952 uc_rq[clamp_id] = (struct uclamp_rq) {
1953 .value = uclamp_none(clamp_id)
1954 };
1955 }
1956
1957 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
1958 }
1959
init_uclamp(void)1960 static void __init init_uclamp(void)
1961 {
1962 struct uclamp_se uc_max = {};
1963 enum uclamp_id clamp_id;
1964 int cpu;
1965
1966 for_each_possible_cpu(cpu)
1967 init_uclamp_rq(cpu_rq(cpu));
1968
1969 for_each_clamp_id(clamp_id) {
1970 uclamp_se_set(&init_task.uclamp_req[clamp_id],
1971 uclamp_none(clamp_id), false);
1972 }
1973
1974 /* System defaults allow max clamp values for both indexes */
1975 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
1976 for_each_clamp_id(clamp_id) {
1977 uclamp_default[clamp_id] = uc_max;
1978 #ifdef CONFIG_UCLAMP_TASK_GROUP
1979 root_task_group.uclamp_req[clamp_id] = uc_max;
1980 root_task_group.uclamp[clamp_id] = uc_max;
1981 #endif
1982 }
1983 }
1984
1985 #else /* CONFIG_UCLAMP_TASK */
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1986 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1987 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)1988 static inline int uclamp_validate(struct task_struct *p,
1989 const struct sched_attr *attr)
1990 {
1991 return -EOPNOTSUPP;
1992 }
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)1993 static void __setscheduler_uclamp(struct task_struct *p,
1994 const struct sched_attr *attr) { }
uclamp_fork(struct task_struct * p)1995 static inline void uclamp_fork(struct task_struct *p) { }
uclamp_post_fork(struct task_struct * p)1996 static inline void uclamp_post_fork(struct task_struct *p) { }
init_uclamp(void)1997 static inline void init_uclamp(void) { }
1998 #endif /* CONFIG_UCLAMP_TASK */
1999
sched_task_on_rq(struct task_struct * p)2000 bool sched_task_on_rq(struct task_struct *p)
2001 {
2002 return task_on_rq_queued(p);
2003 }
2004
enqueue_task(struct rq * rq,struct task_struct * p,int flags)2005 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2006 {
2007 if (!(flags & ENQUEUE_NOCLOCK))
2008 update_rq_clock(rq);
2009
2010 if (!(flags & ENQUEUE_RESTORE)) {
2011 sched_info_enqueue(rq, p);
2012 psi_enqueue(p, flags & ENQUEUE_WAKEUP);
2013 }
2014
2015 uclamp_rq_inc(rq, p);
2016 trace_android_rvh_enqueue_task(rq, p, flags);
2017 p->sched_class->enqueue_task(rq, p, flags);
2018 trace_android_rvh_after_enqueue_task(rq, p, flags);
2019
2020 if (sched_core_enabled(rq))
2021 sched_core_enqueue(rq, p);
2022 }
2023
dequeue_task(struct rq * rq,struct task_struct * p,int flags)2024 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2025 {
2026 if (sched_core_enabled(rq))
2027 sched_core_dequeue(rq, p);
2028
2029 if (!(flags & DEQUEUE_NOCLOCK))
2030 update_rq_clock(rq);
2031
2032 if (!(flags & DEQUEUE_SAVE)) {
2033 sched_info_dequeue(rq, p);
2034 psi_dequeue(p, flags & DEQUEUE_SLEEP);
2035 }
2036
2037 uclamp_rq_dec(rq, p);
2038 trace_android_rvh_dequeue_task(rq, p, flags);
2039 p->sched_class->dequeue_task(rq, p, flags);
2040 trace_android_rvh_after_dequeue_task(rq, p, flags);
2041 }
2042
activate_task(struct rq * rq,struct task_struct * p,int flags)2043 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2044 {
2045 if (task_on_rq_migrating(p))
2046 flags |= ENQUEUE_MIGRATED;
2047
2048 enqueue_task(rq, p, flags);
2049
2050 p->on_rq = TASK_ON_RQ_QUEUED;
2051 }
2052 EXPORT_SYMBOL_GPL(activate_task);
2053
deactivate_task(struct rq * rq,struct task_struct * p,int flags)2054 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2055 {
2056 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
2057
2058 dequeue_task(rq, p, flags);
2059 }
2060 EXPORT_SYMBOL_GPL(deactivate_task);
2061
__normal_prio(int policy,int rt_prio,int nice)2062 static inline int __normal_prio(int policy, int rt_prio, int nice)
2063 {
2064 int prio;
2065
2066 if (dl_policy(policy))
2067 prio = MAX_DL_PRIO - 1;
2068 else if (rt_policy(policy))
2069 prio = MAX_RT_PRIO - 1 - rt_prio;
2070 else
2071 prio = NICE_TO_PRIO(nice);
2072
2073 return prio;
2074 }
2075
2076 /*
2077 * Calculate the expected normal priority: i.e. priority
2078 * without taking RT-inheritance into account. Might be
2079 * boosted by interactivity modifiers. Changes upon fork,
2080 * setprio syscalls, and whenever the interactivity
2081 * estimator recalculates.
2082 */
normal_prio(struct task_struct * p)2083 static inline int normal_prio(struct task_struct *p)
2084 {
2085 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
2086 }
2087
2088 /*
2089 * Calculate the current priority, i.e. the priority
2090 * taken into account by the scheduler. This value might
2091 * be boosted by RT tasks, or might be boosted by
2092 * interactivity modifiers. Will be RT if the task got
2093 * RT-boosted. If not then it returns p->normal_prio.
2094 */
effective_prio(struct task_struct * p)2095 static int effective_prio(struct task_struct *p)
2096 {
2097 p->normal_prio = normal_prio(p);
2098 /*
2099 * If we are RT tasks or we were boosted to RT priority,
2100 * keep the priority unchanged. Otherwise, update priority
2101 * to the normal priority:
2102 */
2103 if (!rt_prio(p->prio))
2104 return p->normal_prio;
2105 return p->prio;
2106 }
2107
2108 /**
2109 * task_curr - is this task currently executing on a CPU?
2110 * @p: the task in question.
2111 *
2112 * Return: 1 if the task is currently executing. 0 otherwise.
2113 */
task_curr(const struct task_struct * p)2114 inline int task_curr(const struct task_struct *p)
2115 {
2116 return cpu_curr(task_cpu(p)) == p;
2117 }
2118
2119 /*
2120 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2121 * use the balance_callback list if you want balancing.
2122 *
2123 * this means any call to check_class_changed() must be followed by a call to
2124 * balance_callback().
2125 */
check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio)2126 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2127 const struct sched_class *prev_class,
2128 int oldprio)
2129 {
2130 if (prev_class != p->sched_class) {
2131 if (prev_class->switched_from)
2132 prev_class->switched_from(rq, p);
2133
2134 p->sched_class->switched_to(rq, p);
2135 } else if (oldprio != p->prio || dl_task(p))
2136 p->sched_class->prio_changed(rq, p, oldprio);
2137 }
2138
check_preempt_curr(struct rq * rq,struct task_struct * p,int flags)2139 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2140 {
2141 if (p->sched_class == rq->curr->sched_class)
2142 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2143 else if (p->sched_class > rq->curr->sched_class)
2144 resched_curr(rq);
2145
2146 /*
2147 * A queue event has occurred, and we're going to schedule. In
2148 * this case, we can save a useless back to back clock update.
2149 */
2150 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
2151 rq_clock_skip_update(rq);
2152 }
2153 EXPORT_SYMBOL_GPL(check_preempt_curr);
2154
2155 #ifdef CONFIG_SMP
2156
2157 static void
2158 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
2159
2160 static int __set_cpus_allowed_ptr(struct task_struct *p,
2161 const struct cpumask *new_mask,
2162 u32 flags);
2163
migrate_disable_switch(struct rq * rq,struct task_struct * p)2164 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2165 {
2166 if (likely(!p->migration_disabled))
2167 return;
2168
2169 if (p->cpus_ptr != &p->cpus_mask)
2170 return;
2171
2172 /*
2173 * Violates locking rules! see comment in __do_set_cpus_allowed().
2174 */
2175 __do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE);
2176 }
2177
migrate_disable(void)2178 void migrate_disable(void)
2179 {
2180 struct task_struct *p = current;
2181
2182 if (p->migration_disabled) {
2183 p->migration_disabled++;
2184 return;
2185 }
2186
2187 preempt_disable();
2188 this_rq()->nr_pinned++;
2189 p->migration_disabled = 1;
2190 preempt_enable();
2191 }
2192 EXPORT_SYMBOL_GPL(migrate_disable);
2193
migrate_enable(void)2194 void migrate_enable(void)
2195 {
2196 struct task_struct *p = current;
2197
2198 if (p->migration_disabled > 1) {
2199 p->migration_disabled--;
2200 return;
2201 }
2202
2203 /*
2204 * Ensure stop_task runs either before or after this, and that
2205 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2206 */
2207 preempt_disable();
2208 if (p->cpus_ptr != &p->cpus_mask)
2209 __set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
2210 /*
2211 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2212 * regular cpus_mask, otherwise things that race (eg.
2213 * select_fallback_rq) get confused.
2214 */
2215 barrier();
2216 p->migration_disabled = 0;
2217 this_rq()->nr_pinned--;
2218 preempt_enable();
2219 }
2220 EXPORT_SYMBOL_GPL(migrate_enable);
2221
rq_has_pinned_tasks(struct rq * rq)2222 static inline bool rq_has_pinned_tasks(struct rq *rq)
2223 {
2224 return rq->nr_pinned;
2225 }
2226
2227 /*
2228 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2229 * __set_cpus_allowed_ptr() and select_fallback_rq().
2230 */
is_cpu_allowed(struct task_struct * p,int cpu)2231 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2232 {
2233 bool allowed = true;
2234
2235 /* When not in the task's cpumask, no point in looking further. */
2236 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
2237 return false;
2238
2239 /* migrate_disabled() must be allowed to finish. */
2240 if (is_migration_disabled(p))
2241 return cpu_online(cpu);
2242
2243 /* check for all cases */
2244 trace_android_rvh_is_cpu_allowed(p, cpu, &allowed);
2245
2246 /* Non kernel threads are not allowed during either online or offline. */
2247 if (!(p->flags & PF_KTHREAD))
2248 return cpu_active(cpu) && task_cpu_possible(cpu, p) && allowed;
2249
2250 /* KTHREAD_IS_PER_CPU is always allowed. */
2251 if (kthread_is_per_cpu(p))
2252 return cpu_online(cpu);
2253
2254 if (!allowed)
2255 return false;
2256
2257 /* Regular kernel threads don't get to stay during offline. */
2258 if (cpu_dying(cpu))
2259 return false;
2260
2261 /* But are allowed during online. */
2262 return cpu_online(cpu);
2263 }
2264
2265 /*
2266 * This is how migration works:
2267 *
2268 * 1) we invoke migration_cpu_stop() on the target CPU using
2269 * stop_one_cpu().
2270 * 2) stopper starts to run (implicitly forcing the migrated thread
2271 * off the CPU)
2272 * 3) it checks whether the migrated task is still in the wrong runqueue.
2273 * 4) if it's in the wrong runqueue then the migration thread removes
2274 * it and puts it into the right queue.
2275 * 5) stopper completes and stop_one_cpu() returns and the migration
2276 * is done.
2277 */
2278
2279 /*
2280 * move_queued_task - move a queued task to new rq.
2281 *
2282 * Returns (locked) new rq. Old rq's lock is released.
2283 */
move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu)2284 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2285 struct task_struct *p, int new_cpu)
2286 {
2287 int detached = 0;
2288
2289 lockdep_assert_rq_held(rq);
2290
2291 /*
2292 * The vendor hook may drop the lock temporarily, so
2293 * pass the rq flags to unpin lock. We expect the
2294 * rq lock to be held after return.
2295 */
2296 trace_android_rvh_migrate_queued_task(rq, rf, p, new_cpu, &detached);
2297 if (detached)
2298 goto attach;
2299
2300 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2301 set_task_cpu(p, new_cpu);
2302
2303 attach:
2304 rq_unlock(rq, rf);
2305 rq = cpu_rq(new_cpu);
2306
2307 rq_lock(rq, rf);
2308 BUG_ON(task_cpu(p) != new_cpu);
2309 activate_task(rq, p, 0);
2310 check_preempt_curr(rq, p, 0);
2311
2312 return rq;
2313 }
2314
2315 struct migration_arg {
2316 struct task_struct *task;
2317 int dest_cpu;
2318 struct set_affinity_pending *pending;
2319 };
2320
2321 /*
2322 * @refs: number of wait_for_completion()
2323 * @stop_pending: is @stop_work in use
2324 */
2325 struct set_affinity_pending {
2326 refcount_t refs;
2327 unsigned int stop_pending;
2328 struct completion done;
2329 struct cpu_stop_work stop_work;
2330 struct migration_arg arg;
2331 };
2332
2333 /*
2334 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2335 * this because either it can't run here any more (set_cpus_allowed()
2336 * away from this CPU, or CPU going down), or because we're
2337 * attempting to rebalance this task on exec (sched_exec).
2338 *
2339 * So we race with normal scheduler movements, but that's OK, as long
2340 * as the task is no longer on this CPU.
2341 */
__migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu)2342 struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2343 struct task_struct *p, int dest_cpu)
2344 {
2345 /* Affinity changed (again). */
2346 if (!is_cpu_allowed(p, dest_cpu))
2347 return rq;
2348
2349 update_rq_clock(rq);
2350 rq = move_queued_task(rq, rf, p, dest_cpu);
2351
2352 return rq;
2353 }
2354 EXPORT_SYMBOL_GPL(__migrate_task);
2355
2356 /*
2357 * migration_cpu_stop - this will be executed by a highprio stopper thread
2358 * and performs thread migration by bumping thread off CPU then
2359 * 'pushing' onto another runqueue.
2360 */
migration_cpu_stop(void * data)2361 static int migration_cpu_stop(void *data)
2362 {
2363 struct migration_arg *arg = data;
2364 struct set_affinity_pending *pending = arg->pending;
2365 struct task_struct *p = arg->task;
2366 struct rq *rq = this_rq();
2367 bool complete = false;
2368 struct rq_flags rf;
2369
2370 /*
2371 * The original target CPU might have gone down and we might
2372 * be on another CPU but it doesn't matter.
2373 */
2374 local_irq_save(rf.flags);
2375 /*
2376 * We need to explicitly wake pending tasks before running
2377 * __migrate_task() such that we will not miss enforcing cpus_ptr
2378 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2379 */
2380 flush_smp_call_function_from_idle();
2381
2382 raw_spin_lock(&p->pi_lock);
2383 rq_lock(rq, &rf);
2384
2385 /*
2386 * If we were passed a pending, then ->stop_pending was set, thus
2387 * p->migration_pending must have remained stable.
2388 */
2389 WARN_ON_ONCE(pending && pending != p->migration_pending);
2390
2391 /*
2392 * If task_rq(p) != rq, it cannot be migrated here, because we're
2393 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2394 * we're holding p->pi_lock.
2395 */
2396 if (task_rq(p) == rq) {
2397 if (is_migration_disabled(p))
2398 goto out;
2399
2400 if (pending) {
2401 p->migration_pending = NULL;
2402 complete = true;
2403
2404 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2405 goto out;
2406 }
2407
2408 if (task_on_rq_queued(p))
2409 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2410 else
2411 p->wake_cpu = arg->dest_cpu;
2412
2413 /*
2414 * XXX __migrate_task() can fail, at which point we might end
2415 * up running on a dodgy CPU, AFAICT this can only happen
2416 * during CPU hotplug, at which point we'll get pushed out
2417 * anyway, so it's probably not a big deal.
2418 */
2419
2420 } else if (pending) {
2421 /*
2422 * This happens when we get migrated between migrate_enable()'s
2423 * preempt_enable() and scheduling the stopper task. At that
2424 * point we're a regular task again and not current anymore.
2425 *
2426 * A !PREEMPT kernel has a giant hole here, which makes it far
2427 * more likely.
2428 */
2429
2430 /*
2431 * The task moved before the stopper got to run. We're holding
2432 * ->pi_lock, so the allowed mask is stable - if it got
2433 * somewhere allowed, we're done.
2434 */
2435 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2436 p->migration_pending = NULL;
2437 complete = true;
2438 goto out;
2439 }
2440
2441 /*
2442 * When migrate_enable() hits a rq mis-match we can't reliably
2443 * determine is_migration_disabled() and so have to chase after
2444 * it.
2445 */
2446 WARN_ON_ONCE(!pending->stop_pending);
2447 preempt_disable();
2448 task_rq_unlock(rq, p, &rf);
2449 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2450 &pending->arg, &pending->stop_work);
2451 preempt_enable();
2452 return 0;
2453 }
2454 out:
2455 if (pending)
2456 pending->stop_pending = false;
2457 task_rq_unlock(rq, p, &rf);
2458
2459 if (complete)
2460 complete_all(&pending->done);
2461
2462 return 0;
2463 }
2464
push_cpu_stop(void * arg)2465 int push_cpu_stop(void *arg)
2466 {
2467 struct rq *lowest_rq = NULL, *rq = this_rq();
2468 struct task_struct *p = arg;
2469
2470 raw_spin_lock_irq(&p->pi_lock);
2471 raw_spin_rq_lock(rq);
2472
2473 if (task_rq(p) != rq)
2474 goto out_unlock;
2475
2476 if (is_migration_disabled(p)) {
2477 p->migration_flags |= MDF_PUSH;
2478 goto out_unlock;
2479 }
2480
2481 p->migration_flags &= ~MDF_PUSH;
2482
2483 if (p->sched_class->find_lock_rq)
2484 lowest_rq = p->sched_class->find_lock_rq(p, rq);
2485
2486 if (!lowest_rq)
2487 goto out_unlock;
2488
2489 // XXX validate p is still the highest prio task
2490 if (task_rq(p) == rq) {
2491 deactivate_task(rq, p, 0);
2492 set_task_cpu(p, lowest_rq->cpu);
2493 activate_task(lowest_rq, p, 0);
2494 resched_curr(lowest_rq);
2495 }
2496
2497 double_unlock_balance(rq, lowest_rq);
2498
2499 out_unlock:
2500 rq->push_busy = false;
2501 raw_spin_rq_unlock(rq);
2502 raw_spin_unlock_irq(&p->pi_lock);
2503
2504 put_task_struct(p);
2505 return 0;
2506 }
2507
2508 /*
2509 * sched_class::set_cpus_allowed must do the below, but is not required to
2510 * actually call this function.
2511 */
set_cpus_allowed_common(struct task_struct * p,const struct cpumask * new_mask,u32 flags)2512 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
2513 {
2514 if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2515 p->cpus_ptr = new_mask;
2516 return;
2517 }
2518
2519 cpumask_copy(&p->cpus_mask, new_mask);
2520 p->nr_cpus_allowed = cpumask_weight(new_mask);
2521 trace_android_rvh_set_cpus_allowed_comm(p, new_mask);
2522 }
2523
2524 static void
__do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask,u32 flags)2525 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
2526 {
2527 struct rq *rq = task_rq(p);
2528 bool queued, running;
2529
2530 /*
2531 * This here violates the locking rules for affinity, since we're only
2532 * supposed to change these variables while holding both rq->lock and
2533 * p->pi_lock.
2534 *
2535 * HOWEVER, it magically works, because ttwu() is the only code that
2536 * accesses these variables under p->pi_lock and only does so after
2537 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2538 * before finish_task().
2539 *
2540 * XXX do further audits, this smells like something putrid.
2541 */
2542 if (flags & SCA_MIGRATE_DISABLE)
2543 SCHED_WARN_ON(!p->on_cpu);
2544 else
2545 lockdep_assert_held(&p->pi_lock);
2546
2547 queued = task_on_rq_queued(p);
2548 running = task_current(rq, p);
2549
2550 if (queued) {
2551 /*
2552 * Because __kthread_bind() calls this on blocked tasks without
2553 * holding rq->lock.
2554 */
2555 lockdep_assert_rq_held(rq);
2556 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2557 }
2558 if (running)
2559 put_prev_task(rq, p);
2560
2561 p->sched_class->set_cpus_allowed(p, new_mask, flags);
2562
2563 if (queued)
2564 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2565 if (running)
2566 set_next_task(rq, p);
2567 }
2568
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)2569 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2570 {
2571 __do_set_cpus_allowed(p, new_mask, 0);
2572 }
2573
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)2574 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2575 int node)
2576 {
2577 cpumask_t *user_mask;
2578 unsigned long flags;
2579
2580 /*
2581 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2582 * may differ by now due to racing.
2583 */
2584 dst->user_cpus_ptr = NULL;
2585
2586 /*
2587 * This check is racy and losing the race is a valid situation.
2588 * It is not worth the extra overhead of taking the pi_lock on
2589 * every fork/clone.
2590 */
2591 if (data_race(!src->user_cpus_ptr))
2592 return 0;
2593
2594 user_mask = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
2595 if (!user_mask)
2596 return -ENOMEM;
2597
2598 /*
2599 * Use pi_lock to protect content of user_cpus_ptr
2600 *
2601 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2602 * do_set_cpus_allowed().
2603 */
2604 raw_spin_lock_irqsave(&src->pi_lock, flags);
2605 if (src->user_cpus_ptr) {
2606 swap(dst->user_cpus_ptr, user_mask);
2607 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2608 }
2609 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2610
2611 if (unlikely(user_mask))
2612 kfree(user_mask);
2613
2614 return 0;
2615 }
2616
clear_user_cpus_ptr(struct task_struct * p)2617 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2618 {
2619 struct cpumask *user_mask = NULL;
2620
2621 swap(p->user_cpus_ptr, user_mask);
2622
2623 return user_mask;
2624 }
2625
release_user_cpus_ptr(struct task_struct * p)2626 void release_user_cpus_ptr(struct task_struct *p)
2627 {
2628 kfree(clear_user_cpus_ptr(p));
2629 }
2630
2631 /*
2632 * This function is wildly self concurrent; here be dragons.
2633 *
2634 *
2635 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2636 * designated task is enqueued on an allowed CPU. If that task is currently
2637 * running, we have to kick it out using the CPU stopper.
2638 *
2639 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2640 * Consider:
2641 *
2642 * Initial conditions: P0->cpus_mask = [0, 1]
2643 *
2644 * P0@CPU0 P1
2645 *
2646 * migrate_disable();
2647 * <preempted>
2648 * set_cpus_allowed_ptr(P0, [1]);
2649 *
2650 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2651 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2652 * This means we need the following scheme:
2653 *
2654 * P0@CPU0 P1
2655 *
2656 * migrate_disable();
2657 * <preempted>
2658 * set_cpus_allowed_ptr(P0, [1]);
2659 * <blocks>
2660 * <resumes>
2661 * migrate_enable();
2662 * __set_cpus_allowed_ptr();
2663 * <wakes local stopper>
2664 * `--> <woken on migration completion>
2665 *
2666 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2667 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2668 * task p are serialized by p->pi_lock, which we can leverage: the one that
2669 * should come into effect at the end of the Migrate-Disable region is the last
2670 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2671 * but we still need to properly signal those waiting tasks at the appropriate
2672 * moment.
2673 *
2674 * This is implemented using struct set_affinity_pending. The first
2675 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2676 * setup an instance of that struct and install it on the targeted task_struct.
2677 * Any and all further callers will reuse that instance. Those then wait for
2678 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2679 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2680 *
2681 *
2682 * (1) In the cases covered above. There is one more where the completion is
2683 * signaled within affine_move_task() itself: when a subsequent affinity request
2684 * occurs after the stopper bailed out due to the targeted task still being
2685 * Migrate-Disable. Consider:
2686 *
2687 * Initial conditions: P0->cpus_mask = [0, 1]
2688 *
2689 * CPU0 P1 P2
2690 * <P0>
2691 * migrate_disable();
2692 * <preempted>
2693 * set_cpus_allowed_ptr(P0, [1]);
2694 * <blocks>
2695 * <migration/0>
2696 * migration_cpu_stop()
2697 * is_migration_disabled()
2698 * <bails>
2699 * set_cpus_allowed_ptr(P0, [0, 1]);
2700 * <signal completion>
2701 * <awakes>
2702 *
2703 * Note that the above is safe vs a concurrent migrate_enable(), as any
2704 * pending affinity completion is preceded by an uninstallation of
2705 * p->migration_pending done with p->pi_lock held.
2706 */
affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags)2707 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2708 int dest_cpu, unsigned int flags)
2709 {
2710 struct set_affinity_pending my_pending = { }, *pending = NULL;
2711 bool stop_pending, complete = false;
2712
2713 /* Can the task run on the task's current CPU? If so, we're done */
2714 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2715 struct task_struct *push_task = NULL;
2716
2717 if ((flags & SCA_MIGRATE_ENABLE) &&
2718 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2719 rq->push_busy = true;
2720 push_task = get_task_struct(p);
2721 }
2722
2723 /*
2724 * If there are pending waiters, but no pending stop_work,
2725 * then complete now.
2726 */
2727 pending = p->migration_pending;
2728 if (pending && !pending->stop_pending) {
2729 p->migration_pending = NULL;
2730 complete = true;
2731 }
2732
2733 preempt_disable();
2734 task_rq_unlock(rq, p, rf);
2735 if (push_task) {
2736 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2737 p, &rq->push_work);
2738 }
2739 preempt_enable();
2740
2741 if (complete)
2742 complete_all(&pending->done);
2743
2744 return 0;
2745 }
2746
2747 if (!(flags & SCA_MIGRATE_ENABLE)) {
2748 /* serialized by p->pi_lock */
2749 if (!p->migration_pending) {
2750 /* Install the request */
2751 refcount_set(&my_pending.refs, 1);
2752 init_completion(&my_pending.done);
2753 my_pending.arg = (struct migration_arg) {
2754 .task = p,
2755 .dest_cpu = dest_cpu,
2756 .pending = &my_pending,
2757 };
2758
2759 p->migration_pending = &my_pending;
2760 } else {
2761 pending = p->migration_pending;
2762 refcount_inc(&pending->refs);
2763 /*
2764 * Affinity has changed, but we've already installed a
2765 * pending. migration_cpu_stop() *must* see this, else
2766 * we risk a completion of the pending despite having a
2767 * task on a disallowed CPU.
2768 *
2769 * Serialized by p->pi_lock, so this is safe.
2770 */
2771 pending->arg.dest_cpu = dest_cpu;
2772 }
2773 }
2774 pending = p->migration_pending;
2775 /*
2776 * - !MIGRATE_ENABLE:
2777 * we'll have installed a pending if there wasn't one already.
2778 *
2779 * - MIGRATE_ENABLE:
2780 * we're here because the current CPU isn't matching anymore,
2781 * the only way that can happen is because of a concurrent
2782 * set_cpus_allowed_ptr() call, which should then still be
2783 * pending completion.
2784 *
2785 * Either way, we really should have a @pending here.
2786 */
2787 if (WARN_ON_ONCE(!pending)) {
2788 task_rq_unlock(rq, p, rf);
2789 return -EINVAL;
2790 }
2791
2792 if (task_running(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
2793 /*
2794 * MIGRATE_ENABLE gets here because 'p == current', but for
2795 * anything else we cannot do is_migration_disabled(), punt
2796 * and have the stopper function handle it all race-free.
2797 */
2798 stop_pending = pending->stop_pending;
2799 if (!stop_pending)
2800 pending->stop_pending = true;
2801
2802 if (flags & SCA_MIGRATE_ENABLE)
2803 p->migration_flags &= ~MDF_PUSH;
2804
2805 preempt_disable();
2806 task_rq_unlock(rq, p, rf);
2807 if (!stop_pending) {
2808 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
2809 &pending->arg, &pending->stop_work);
2810 }
2811 preempt_enable();
2812
2813 if (flags & SCA_MIGRATE_ENABLE)
2814 return 0;
2815 } else {
2816
2817 if (!is_migration_disabled(p)) {
2818 if (task_on_rq_queued(p))
2819 rq = move_queued_task(rq, rf, p, dest_cpu);
2820
2821 if (!pending->stop_pending) {
2822 p->migration_pending = NULL;
2823 complete = true;
2824 }
2825 }
2826 task_rq_unlock(rq, p, rf);
2827
2828 if (complete)
2829 complete_all(&pending->done);
2830 }
2831
2832 wait_for_completion(&pending->done);
2833
2834 if (refcount_dec_and_test(&pending->refs))
2835 wake_up_var(&pending->refs); /* No UaF, just an address */
2836
2837 /*
2838 * Block the original owner of &pending until all subsequent callers
2839 * have seen the completion and decremented the refcount
2840 */
2841 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
2842
2843 /* ARGH */
2844 WARN_ON_ONCE(my_pending.stop_pending);
2845
2846 return 0;
2847 }
2848
2849 /*
2850 * Called with both p->pi_lock and rq->lock held; drops both before returning.
2851 */
__set_cpus_allowed_ptr_locked(struct task_struct * p,const struct cpumask * new_mask,u32 flags,struct rq * rq,struct rq_flags * rf)2852 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
2853 const struct cpumask *new_mask,
2854 u32 flags,
2855 struct rq *rq,
2856 struct rq_flags *rf)
2857 __releases(rq->lock)
2858 __releases(p->pi_lock)
2859 {
2860 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
2861 const struct cpumask *cpu_valid_mask = cpu_active_mask;
2862 bool kthread = p->flags & PF_KTHREAD;
2863 struct cpumask *user_mask = NULL;
2864 unsigned int dest_cpu;
2865 int ret = 0;
2866
2867 update_rq_clock(rq);
2868
2869 if (kthread || is_migration_disabled(p)) {
2870 /*
2871 * Kernel threads are allowed on online && !active CPUs,
2872 * however, during cpu-hot-unplug, even these might get pushed
2873 * away if not KTHREAD_IS_PER_CPU.
2874 *
2875 * Specifically, migration_disabled() tasks must not fail the
2876 * cpumask_any_and_distribute() pick below, esp. so on
2877 * SCA_MIGRATE_ENABLE, otherwise we'll not call
2878 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
2879 */
2880 cpu_valid_mask = cpu_online_mask;
2881 }
2882
2883 if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
2884 ret = -EINVAL;
2885 goto out;
2886 }
2887
2888 /*
2889 * Must re-check here, to close a race against __kthread_bind(),
2890 * sched_setaffinity() is not guaranteed to observe the flag.
2891 */
2892 if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
2893 ret = -EINVAL;
2894 goto out;
2895 }
2896
2897 if (!(flags & SCA_MIGRATE_ENABLE)) {
2898 if (cpumask_equal(&p->cpus_mask, new_mask))
2899 goto out;
2900
2901 if (WARN_ON_ONCE(p == current &&
2902 is_migration_disabled(p) &&
2903 !cpumask_test_cpu(task_cpu(p), new_mask))) {
2904 ret = -EBUSY;
2905 goto out;
2906 }
2907 }
2908
2909 /*
2910 * Picking a ~random cpu helps in cases where we are changing affinity
2911 * for groups of tasks (ie. cpuset), so that load balancing is not
2912 * immediately required to distribute the tasks within their new mask.
2913 */
2914 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
2915 trace_android_rvh_set_cpus_allowed_ptr_locked(cpu_valid_mask, new_mask, &dest_cpu);
2916 trace_android_rvh_set_cpus_allowed_by_task(cpu_valid_mask, new_mask, p, &dest_cpu);
2917
2918 if (dest_cpu >= nr_cpu_ids) {
2919 ret = -EINVAL;
2920 goto out;
2921 }
2922
2923 __do_set_cpus_allowed(p, new_mask, flags);
2924
2925 if (flags & SCA_USER)
2926 user_mask = clear_user_cpus_ptr(p);
2927
2928 ret = affine_move_task(rq, p, rf, dest_cpu, flags);
2929
2930 kfree(user_mask);
2931
2932 return ret;
2933
2934 out:
2935 task_rq_unlock(rq, p, rf);
2936
2937 return ret;
2938 }
2939
2940 /*
2941 * Change a given task's CPU affinity. Migrate the thread to a
2942 * proper CPU and schedule it away if the CPU it's executing on
2943 * is removed from the allowed bitmask.
2944 *
2945 * NOTE: the caller must have a valid reference to the task, the
2946 * task must not exit() & deallocate itself prematurely. The
2947 * call is not atomic; no spinlocks may be held.
2948 */
__set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask,u32 flags)2949 static int __set_cpus_allowed_ptr(struct task_struct *p,
2950 const struct cpumask *new_mask, u32 flags)
2951 {
2952 struct rq_flags rf;
2953 struct rq *rq;
2954
2955 rq = task_rq_lock(p, &rf);
2956 return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf);
2957 }
2958
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)2959 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
2960 {
2961 return __set_cpus_allowed_ptr(p, new_mask, 0);
2962 }
2963 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
2964
2965 /*
2966 * Change a given task's CPU affinity to the intersection of its current
2967 * affinity mask and @subset_mask, writing the resulting mask to @new_mask
2968 * and pointing @p->user_cpus_ptr to a copy of the old mask.
2969 * If the resulting mask is empty, leave the affinity unchanged and return
2970 * -EINVAL.
2971 */
restrict_cpus_allowed_ptr(struct task_struct * p,struct cpumask * new_mask,const struct cpumask * subset_mask)2972 static int restrict_cpus_allowed_ptr(struct task_struct *p,
2973 struct cpumask *new_mask,
2974 const struct cpumask *subset_mask)
2975 {
2976 struct cpumask *user_mask = NULL;
2977 struct rq_flags rf;
2978 struct rq *rq;
2979 int err;
2980
2981 if (!p->user_cpus_ptr) {
2982 user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
2983 if (!user_mask)
2984 return -ENOMEM;
2985 }
2986
2987 rq = task_rq_lock(p, &rf);
2988
2989 /*
2990 * Forcefully restricting the affinity of a deadline task is
2991 * likely to cause problems, so fail and noisily override the
2992 * mask entirely.
2993 */
2994 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
2995 err = -EPERM;
2996 goto err_unlock;
2997 }
2998
2999 if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
3000 err = -EINVAL;
3001 goto err_unlock;
3002 }
3003
3004 /*
3005 * We're about to butcher the task affinity, so keep track of what
3006 * the user asked for in case we're able to restore it later on.
3007 */
3008 if (user_mask) {
3009 cpumask_copy(user_mask, p->cpus_ptr);
3010 p->user_cpus_ptr = user_mask;
3011 }
3012
3013 return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);
3014
3015 err_unlock:
3016 task_rq_unlock(rq, p, &rf);
3017 kfree(user_mask);
3018 return err;
3019 }
3020
3021 /*
3022 * Restrict the CPU affinity of task @p so that it is a subset of
3023 * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
3024 * old affinity mask. If the resulting mask is empty, we warn and walk
3025 * up the cpuset hierarchy until we find a suitable mask.
3026 */
force_compatible_cpus_allowed_ptr(struct task_struct * p)3027 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3028 {
3029 cpumask_var_t new_mask;
3030 const struct cpumask *override_mask = task_cpu_possible_mask(p);
3031
3032 alloc_cpumask_var(&new_mask, GFP_KERNEL);
3033
3034 /*
3035 * __migrate_task() can fail silently in the face of concurrent
3036 * offlining of the chosen destination CPU, so take the hotplug
3037 * lock to ensure that the migration succeeds.
3038 */
3039 trace_android_vh_force_compatible_pre(NULL);
3040 cpus_read_lock();
3041 if (!cpumask_available(new_mask))
3042 goto out_set_mask;
3043
3044 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3045 goto out_free_mask;
3046
3047 /*
3048 * We failed to find a valid subset of the affinity mask for the
3049 * task, so override it based on its cpuset hierarchy.
3050 */
3051 cpuset_cpus_allowed(p, new_mask);
3052 override_mask = new_mask;
3053
3054 out_set_mask:
3055 if (printk_ratelimit()) {
3056 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3057 task_pid_nr(p), p->comm,
3058 cpumask_pr_args(override_mask));
3059 }
3060
3061 WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3062 out_free_mask:
3063 cpus_read_unlock();
3064 trace_android_vh_force_compatible_post(NULL);
3065 free_cpumask_var(new_mask);
3066 }
3067
3068 static int
3069 __sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
3070
3071 /*
3072 * Restore the affinity of a task @p which was previously restricted by a
3073 * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
3074 * @p->user_cpus_ptr.
3075 *
3076 * It is the caller's responsibility to serialise this with any calls to
3077 * force_compatible_cpus_allowed_ptr(@p).
3078 */
relax_compatible_cpus_allowed_ptr(struct task_struct * p)3079 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3080 {
3081 struct cpumask *user_mask = p->user_cpus_ptr;
3082 unsigned long flags;
3083
3084 /*
3085 * Try to restore the old affinity mask. If this fails, then
3086 * we free the mask explicitly to avoid it being inherited across
3087 * a subsequent fork().
3088 */
3089 if (!user_mask || !__sched_setaffinity(p, user_mask))
3090 return;
3091
3092 raw_spin_lock_irqsave(&p->pi_lock, flags);
3093 user_mask = clear_user_cpus_ptr(p);
3094 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3095
3096 kfree(user_mask);
3097 }
3098
set_task_cpu(struct task_struct * p,unsigned int new_cpu)3099 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3100 {
3101 #ifdef CONFIG_SCHED_DEBUG
3102 unsigned int state = READ_ONCE(p->__state);
3103
3104 /*
3105 * We should never call set_task_cpu() on a blocked task,
3106 * ttwu() will sort out the placement.
3107 */
3108 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3109
3110 /*
3111 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3112 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3113 * time relying on p->on_rq.
3114 */
3115 WARN_ON_ONCE(state == TASK_RUNNING &&
3116 p->sched_class == &fair_sched_class &&
3117 (p->on_rq && !task_on_rq_migrating(p)));
3118
3119 #ifdef CONFIG_LOCKDEP
3120 /*
3121 * The caller should hold either p->pi_lock or rq->lock, when changing
3122 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3123 *
3124 * sched_move_task() holds both and thus holding either pins the cgroup,
3125 * see task_group().
3126 *
3127 * Furthermore, all task_rq users should acquire both locks, see
3128 * task_rq_lock().
3129 */
3130 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3131 lockdep_is_held(__rq_lockp(task_rq(p)))));
3132 #endif
3133 /*
3134 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3135 */
3136 WARN_ON_ONCE(!cpu_online(new_cpu));
3137
3138 WARN_ON_ONCE(is_migration_disabled(p));
3139 #endif
3140
3141 trace_sched_migrate_task(p, new_cpu);
3142
3143 if (task_cpu(p) != new_cpu) {
3144 if (p->sched_class->migrate_task_rq)
3145 p->sched_class->migrate_task_rq(p, new_cpu);
3146 p->se.nr_migrations++;
3147 rseq_migrate(p);
3148 perf_event_task_migrate(p);
3149 trace_android_rvh_set_task_cpu(p, new_cpu);
3150 }
3151
3152 __set_task_cpu(p, new_cpu);
3153 }
3154 EXPORT_SYMBOL_GPL(set_task_cpu);
3155
__migrate_swap_task(struct task_struct * p,int cpu)3156 static void __migrate_swap_task(struct task_struct *p, int cpu)
3157 {
3158 if (task_on_rq_queued(p)) {
3159 struct rq *src_rq, *dst_rq;
3160 struct rq_flags srf, drf;
3161
3162 src_rq = task_rq(p);
3163 dst_rq = cpu_rq(cpu);
3164
3165 rq_pin_lock(src_rq, &srf);
3166 rq_pin_lock(dst_rq, &drf);
3167
3168 deactivate_task(src_rq, p, 0);
3169 set_task_cpu(p, cpu);
3170 activate_task(dst_rq, p, 0);
3171 check_preempt_curr(dst_rq, p, 0);
3172
3173 rq_unpin_lock(dst_rq, &drf);
3174 rq_unpin_lock(src_rq, &srf);
3175
3176 } else {
3177 /*
3178 * Task isn't running anymore; make it appear like we migrated
3179 * it before it went to sleep. This means on wakeup we make the
3180 * previous CPU our target instead of where it really is.
3181 */
3182 p->wake_cpu = cpu;
3183 }
3184 }
3185
3186 struct migration_swap_arg {
3187 struct task_struct *src_task, *dst_task;
3188 int src_cpu, dst_cpu;
3189 };
3190
migrate_swap_stop(void * data)3191 static int migrate_swap_stop(void *data)
3192 {
3193 struct migration_swap_arg *arg = data;
3194 struct rq *src_rq, *dst_rq;
3195 int ret = -EAGAIN;
3196
3197 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3198 return -EAGAIN;
3199
3200 src_rq = cpu_rq(arg->src_cpu);
3201 dst_rq = cpu_rq(arg->dst_cpu);
3202
3203 double_raw_lock(&arg->src_task->pi_lock,
3204 &arg->dst_task->pi_lock);
3205 double_rq_lock(src_rq, dst_rq);
3206
3207 if (task_cpu(arg->dst_task) != arg->dst_cpu)
3208 goto unlock;
3209
3210 if (task_cpu(arg->src_task) != arg->src_cpu)
3211 goto unlock;
3212
3213 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3214 goto unlock;
3215
3216 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3217 goto unlock;
3218
3219 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3220 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3221
3222 ret = 0;
3223
3224 unlock:
3225 double_rq_unlock(src_rq, dst_rq);
3226 raw_spin_unlock(&arg->dst_task->pi_lock);
3227 raw_spin_unlock(&arg->src_task->pi_lock);
3228
3229 return ret;
3230 }
3231
3232 /*
3233 * Cross migrate two tasks
3234 */
migrate_swap(struct task_struct * cur,struct task_struct * p,int target_cpu,int curr_cpu)3235 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3236 int target_cpu, int curr_cpu)
3237 {
3238 struct migration_swap_arg arg;
3239 int ret = -EINVAL;
3240
3241 arg = (struct migration_swap_arg){
3242 .src_task = cur,
3243 .src_cpu = curr_cpu,
3244 .dst_task = p,
3245 .dst_cpu = target_cpu,
3246 };
3247
3248 if (arg.src_cpu == arg.dst_cpu)
3249 goto out;
3250
3251 /*
3252 * These three tests are all lockless; this is OK since all of them
3253 * will be re-checked with proper locks held further down the line.
3254 */
3255 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3256 goto out;
3257
3258 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3259 goto out;
3260
3261 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3262 goto out;
3263
3264 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3265 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3266
3267 out:
3268 return ret;
3269 }
3270 EXPORT_SYMBOL_GPL(migrate_swap);
3271
3272 /*
3273 * wait_task_inactive - wait for a thread to unschedule.
3274 *
3275 * If @match_state is nonzero, it's the @p->state value just checked and
3276 * not expected to change. If it changes, i.e. @p might have woken up,
3277 * then return zero. When we succeed in waiting for @p to be off its CPU,
3278 * we return a positive number (its total switch count). If a second call
3279 * a short while later returns the same number, the caller can be sure that
3280 * @p has remained unscheduled the whole time.
3281 *
3282 * The caller must ensure that the task *will* unschedule sometime soon,
3283 * else this function might spin for a *long* time. This function can't
3284 * be called with interrupts off, or it may introduce deadlock with
3285 * smp_call_function() if an IPI is sent by the same process we are
3286 * waiting to become inactive.
3287 */
wait_task_inactive(struct task_struct * p,unsigned int match_state)3288 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
3289 {
3290 int running, queued;
3291 struct rq_flags rf;
3292 unsigned long ncsw;
3293 struct rq *rq;
3294
3295 for (;;) {
3296 /*
3297 * We do the initial early heuristics without holding
3298 * any task-queue locks at all. We'll only try to get
3299 * the runqueue lock when things look like they will
3300 * work out!
3301 */
3302 rq = task_rq(p);
3303
3304 /*
3305 * If the task is actively running on another CPU
3306 * still, just relax and busy-wait without holding
3307 * any locks.
3308 *
3309 * NOTE! Since we don't hold any locks, it's not
3310 * even sure that "rq" stays as the right runqueue!
3311 * But we don't care, since "task_running()" will
3312 * return false if the runqueue has changed and p
3313 * is actually now running somewhere else!
3314 */
3315 while (task_running(rq, p)) {
3316 if (match_state && unlikely(READ_ONCE(p->__state) != match_state))
3317 return 0;
3318 cpu_relax();
3319 }
3320
3321 /*
3322 * Ok, time to look more closely! We need the rq
3323 * lock now, to be *sure*. If we're wrong, we'll
3324 * just go back and repeat.
3325 */
3326 rq = task_rq_lock(p, &rf);
3327 trace_sched_wait_task(p);
3328 running = task_running(rq, p);
3329 queued = task_on_rq_queued(p);
3330 ncsw = 0;
3331 if (!match_state || READ_ONCE(p->__state) == match_state)
3332 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
3333 task_rq_unlock(rq, p, &rf);
3334
3335 /*
3336 * If it changed from the expected state, bail out now.
3337 */
3338 if (unlikely(!ncsw))
3339 break;
3340
3341 /*
3342 * Was it really running after all now that we
3343 * checked with the proper locks actually held?
3344 *
3345 * Oops. Go back and try again..
3346 */
3347 if (unlikely(running)) {
3348 cpu_relax();
3349 continue;
3350 }
3351
3352 /*
3353 * It's not enough that it's not actively running,
3354 * it must be off the runqueue _entirely_, and not
3355 * preempted!
3356 *
3357 * So if it was still runnable (but just not actively
3358 * running right now), it's preempted, and we should
3359 * yield - it could be a while.
3360 */
3361 if (unlikely(queued)) {
3362 ktime_t to = NSEC_PER_SEC / HZ;
3363
3364 set_current_state(TASK_UNINTERRUPTIBLE);
3365 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3366 continue;
3367 }
3368
3369 /*
3370 * Ahh, all good. It wasn't running, and it wasn't
3371 * runnable, which means that it will never become
3372 * running in the future either. We're all done!
3373 */
3374 break;
3375 }
3376
3377 return ncsw;
3378 }
3379
3380 /***
3381 * kick_process - kick a running thread to enter/exit the kernel
3382 * @p: the to-be-kicked thread
3383 *
3384 * Cause a process which is running on another CPU to enter
3385 * kernel-mode, without any delay. (to get signals handled.)
3386 *
3387 * NOTE: this function doesn't have to take the runqueue lock,
3388 * because all it wants to ensure is that the remote task enters
3389 * the kernel. If the IPI races and the task has been migrated
3390 * to another CPU then no harm is done and the purpose has been
3391 * achieved as well.
3392 */
kick_process(struct task_struct * p)3393 void kick_process(struct task_struct *p)
3394 {
3395 int cpu;
3396
3397 preempt_disable();
3398 cpu = task_cpu(p);
3399 if ((cpu != smp_processor_id()) && task_curr(p))
3400 smp_send_reschedule(cpu);
3401 preempt_enable();
3402 }
3403 EXPORT_SYMBOL_GPL(kick_process);
3404
3405 /*
3406 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3407 *
3408 * A few notes on cpu_active vs cpu_online:
3409 *
3410 * - cpu_active must be a subset of cpu_online
3411 *
3412 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3413 * see __set_cpus_allowed_ptr(). At this point the newly online
3414 * CPU isn't yet part of the sched domains, and balancing will not
3415 * see it.
3416 *
3417 * - on CPU-down we clear cpu_active() to mask the sched domains and
3418 * avoid the load balancer to place new tasks on the to be removed
3419 * CPU. Existing tasks will remain running there and will be taken
3420 * off.
3421 *
3422 * This means that fallback selection must not select !active CPUs.
3423 * And can assume that any active CPU must be online. Conversely
3424 * select_task_rq() below may allow selection of !active CPUs in order
3425 * to satisfy the above rules.
3426 */
select_fallback_rq(int cpu,struct task_struct * p)3427 int select_fallback_rq(int cpu, struct task_struct *p)
3428 {
3429 int nid = cpu_to_node(cpu);
3430 const struct cpumask *nodemask = NULL;
3431 enum { cpuset, possible, fail } state = cpuset;
3432 int dest_cpu = -1;
3433
3434 trace_android_rvh_select_fallback_rq(cpu, p, &dest_cpu);
3435 if (dest_cpu >= 0)
3436 return dest_cpu;
3437
3438 /*
3439 * If the node that the CPU is on has been offlined, cpu_to_node()
3440 * will return -1. There is no CPU on the node, and we should
3441 * select the CPU on the other node.
3442 */
3443 if (nid != -1) {
3444 nodemask = cpumask_of_node(nid);
3445
3446 /* Look for allowed, online CPU in same node. */
3447 for_each_cpu(dest_cpu, nodemask) {
3448 if (is_cpu_allowed(p, dest_cpu))
3449 return dest_cpu;
3450 }
3451 }
3452
3453 for (;;) {
3454 /* Any allowed, online CPU? */
3455 for_each_cpu(dest_cpu, p->cpus_ptr) {
3456 if (!is_cpu_allowed(p, dest_cpu))
3457 continue;
3458
3459 goto out;
3460 }
3461
3462 /* No more Mr. Nice Guy. */
3463 switch (state) {
3464 case cpuset:
3465 if (cpuset_cpus_allowed_fallback(p)) {
3466 state = possible;
3467 break;
3468 }
3469 fallthrough;
3470 case possible:
3471 /*
3472 * XXX When called from select_task_rq() we only
3473 * hold p->pi_lock and again violate locking order.
3474 *
3475 * More yuck to audit.
3476 */
3477 do_set_cpus_allowed(p, task_cpu_possible_mask(p));
3478 state = fail;
3479 break;
3480 case fail:
3481 BUG();
3482 break;
3483 }
3484 }
3485
3486 out:
3487 if (state != cpuset) {
3488 /*
3489 * Don't tell them about moving exiting tasks or
3490 * kernel threads (both mm NULL), since they never
3491 * leave kernel.
3492 */
3493 if (p->mm && printk_ratelimit()) {
3494 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3495 task_pid_nr(p), p->comm, cpu);
3496 }
3497 }
3498
3499 return dest_cpu;
3500 }
3501 EXPORT_SYMBOL_GPL(select_fallback_rq);
3502
3503 /*
3504 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3505 */
3506 static inline
select_task_rq(struct task_struct * p,int cpu,int wake_flags)3507 int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
3508 {
3509 lockdep_assert_held(&p->pi_lock);
3510
3511 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
3512 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
3513 else
3514 cpu = cpumask_any(p->cpus_ptr);
3515
3516 /*
3517 * In order not to call set_task_cpu() on a blocking task we need
3518 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3519 * CPU.
3520 *
3521 * Since this is common to all placement strategies, this lives here.
3522 *
3523 * [ this allows ->select_task() to simply return task_cpu(p) and
3524 * not worry about this generic constraint ]
3525 */
3526 if (unlikely(!is_cpu_allowed(p, cpu)))
3527 cpu = select_fallback_rq(task_cpu(p), p);
3528
3529 return cpu;
3530 }
3531
sched_set_stop_task(int cpu,struct task_struct * stop)3532 void sched_set_stop_task(int cpu, struct task_struct *stop)
3533 {
3534 static struct lock_class_key stop_pi_lock;
3535 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3536 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3537
3538 if (stop) {
3539 /*
3540 * Make it appear like a SCHED_FIFO task, its something
3541 * userspace knows about and won't get confused about.
3542 *
3543 * Also, it will make PI more or less work without too
3544 * much confusion -- but then, stop work should not
3545 * rely on PI working anyway.
3546 */
3547 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
3548
3549 stop->sched_class = &stop_sched_class;
3550
3551 /*
3552 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3553 * adjust the effective priority of a task. As a result,
3554 * rt_mutex_setprio() can trigger (RT) balancing operations,
3555 * which can then trigger wakeups of the stop thread to push
3556 * around the current task.
3557 *
3558 * The stop task itself will never be part of the PI-chain, it
3559 * never blocks, therefore that ->pi_lock recursion is safe.
3560 * Tell lockdep about this by placing the stop->pi_lock in its
3561 * own class.
3562 */
3563 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3564 }
3565
3566 cpu_rq(cpu)->stop = stop;
3567
3568 if (old_stop) {
3569 /*
3570 * Reset it back to a normal scheduling class so that
3571 * it can die in pieces.
3572 */
3573 old_stop->sched_class = &rt_sched_class;
3574 }
3575 }
3576
3577 #else /* CONFIG_SMP */
3578
__set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask,u32 flags)3579 static inline int __set_cpus_allowed_ptr(struct task_struct *p,
3580 const struct cpumask *new_mask,
3581 u32 flags)
3582 {
3583 return set_cpus_allowed_ptr(p, new_mask);
3584 }
3585
migrate_disable_switch(struct rq * rq,struct task_struct * p)3586 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3587
rq_has_pinned_tasks(struct rq * rq)3588 static inline bool rq_has_pinned_tasks(struct rq *rq)
3589 {
3590 return false;
3591 }
3592
3593 #endif /* !CONFIG_SMP */
3594
3595 static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)3596 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3597 {
3598 struct rq *rq;
3599
3600 if (!schedstat_enabled())
3601 return;
3602
3603 rq = this_rq();
3604
3605 #ifdef CONFIG_SMP
3606 if (cpu == rq->cpu) {
3607 __schedstat_inc(rq->ttwu_local);
3608 __schedstat_inc(p->se.statistics.nr_wakeups_local);
3609 } else {
3610 struct sched_domain *sd;
3611
3612 __schedstat_inc(p->se.statistics.nr_wakeups_remote);
3613 rcu_read_lock();
3614 for_each_domain(rq->cpu, sd) {
3615 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3616 __schedstat_inc(sd->ttwu_wake_remote);
3617 break;
3618 }
3619 }
3620 rcu_read_unlock();
3621 }
3622
3623 if (wake_flags & WF_MIGRATED)
3624 __schedstat_inc(p->se.statistics.nr_wakeups_migrate);
3625 #endif /* CONFIG_SMP */
3626
3627 __schedstat_inc(rq->ttwu_count);
3628 __schedstat_inc(p->se.statistics.nr_wakeups);
3629
3630 if (wake_flags & WF_SYNC)
3631 __schedstat_inc(p->se.statistics.nr_wakeups_sync);
3632 }
3633
3634 /*
3635 * Mark the task runnable and perform wakeup-preemption.
3636 */
ttwu_do_wakeup(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)3637 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
3638 struct rq_flags *rf)
3639 {
3640 check_preempt_curr(rq, p, wake_flags);
3641 WRITE_ONCE(p->__state, TASK_RUNNING);
3642 trace_sched_wakeup(p);
3643
3644 #ifdef CONFIG_SMP
3645 if (p->sched_class->task_woken) {
3646 /*
3647 * Our task @p is fully woken up and running; so it's safe to
3648 * drop the rq->lock, hereafter rq is only used for statistics.
3649 */
3650 rq_unpin_lock(rq, rf);
3651 p->sched_class->task_woken(rq, p);
3652 rq_repin_lock(rq, rf);
3653 }
3654
3655 if (rq->idle_stamp) {
3656 u64 delta = rq_clock(rq) - rq->idle_stamp;
3657 u64 max = 2*rq->max_idle_balance_cost;
3658
3659 update_avg(&rq->avg_idle, delta);
3660
3661 if (rq->avg_idle > max)
3662 rq->avg_idle = max;
3663
3664 rq->wake_stamp = jiffies;
3665 rq->wake_avg_idle = rq->avg_idle / 2;
3666
3667 rq->idle_stamp = 0;
3668 }
3669 #endif
3670 }
3671
3672 static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)3673 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3674 struct rq_flags *rf)
3675 {
3676 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3677
3678 if (wake_flags & WF_SYNC)
3679 en_flags |= ENQUEUE_WAKEUP_SYNC;
3680
3681 lockdep_assert_rq_held(rq);
3682
3683 if (p->sched_contributes_to_load)
3684 rq->nr_uninterruptible--;
3685
3686 #ifdef CONFIG_SMP
3687 if (wake_flags & WF_MIGRATED)
3688 en_flags |= ENQUEUE_MIGRATED;
3689 else
3690 #endif
3691 if (p->in_iowait) {
3692 delayacct_blkio_end(p);
3693 atomic_dec(&task_rq(p)->nr_iowait);
3694 }
3695
3696 activate_task(rq, p, en_flags);
3697 ttwu_do_wakeup(rq, p, wake_flags, rf);
3698 }
3699
3700 /*
3701 * Consider @p being inside a wait loop:
3702 *
3703 * for (;;) {
3704 * set_current_state(TASK_UNINTERRUPTIBLE);
3705 *
3706 * if (CONDITION)
3707 * break;
3708 *
3709 * schedule();
3710 * }
3711 * __set_current_state(TASK_RUNNING);
3712 *
3713 * between set_current_state() and schedule(). In this case @p is still
3714 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3715 * an atomic manner.
3716 *
3717 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3718 * then schedule() must still happen and p->state can be changed to
3719 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3720 * need to do a full wakeup with enqueue.
3721 *
3722 * Returns: %true when the wakeup is done,
3723 * %false otherwise.
3724 */
ttwu_runnable(struct task_struct * p,int wake_flags)3725 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3726 {
3727 struct rq_flags rf;
3728 struct rq *rq;
3729 int ret = 0;
3730
3731 rq = __task_rq_lock(p, &rf);
3732 if (task_on_rq_queued(p)) {
3733 /* check_preempt_curr() may use rq clock */
3734 update_rq_clock(rq);
3735 ttwu_do_wakeup(rq, p, wake_flags, &rf);
3736 ret = 1;
3737 }
3738 __task_rq_unlock(rq, &rf);
3739
3740 return ret;
3741 }
3742
3743 #ifdef CONFIG_SMP
sched_ttwu_pending(void * arg)3744 void sched_ttwu_pending(void *arg)
3745 {
3746 struct llist_node *llist = arg;
3747 struct rq *rq = this_rq();
3748 struct task_struct *p, *t;
3749 struct rq_flags rf;
3750
3751 if (!llist)
3752 return;
3753
3754 /*
3755 * rq::ttwu_pending racy indication of out-standing wakeups.
3756 * Races such that false-negatives are possible, since they
3757 * are shorter lived that false-positives would be.
3758 */
3759 WRITE_ONCE(rq->ttwu_pending, 0);
3760
3761 rq_lock_irqsave(rq, &rf);
3762 update_rq_clock(rq);
3763
3764 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3765 if (WARN_ON_ONCE(p->on_cpu))
3766 smp_cond_load_acquire(&p->on_cpu, !VAL);
3767
3768 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3769 set_task_cpu(p, cpu_of(rq));
3770
3771 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3772 }
3773
3774 rq_unlock_irqrestore(rq, &rf);
3775 }
3776
send_call_function_single_ipi(int cpu)3777 void send_call_function_single_ipi(int cpu)
3778 {
3779 struct rq *rq = cpu_rq(cpu);
3780
3781 if (!set_nr_if_polling(rq->idle))
3782 arch_send_call_function_single_ipi(cpu);
3783 else
3784 trace_sched_wake_idle_without_ipi(cpu);
3785 }
3786
3787 /*
3788 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3789 * necessary. The wakee CPU on receipt of the IPI will queue the task
3790 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3791 * of the wakeup instead of the waker.
3792 */
__ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3793 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3794 {
3795 struct rq *rq = cpu_rq(cpu);
3796
3797 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3798
3799 WRITE_ONCE(rq->ttwu_pending, 1);
3800 __smp_call_single_queue(cpu, &p->wake_entry.llist);
3801 }
3802
wake_up_if_idle(int cpu)3803 void wake_up_if_idle(int cpu)
3804 {
3805 struct rq *rq = cpu_rq(cpu);
3806 struct rq_flags rf;
3807
3808 rcu_read_lock();
3809
3810 if (!is_idle_task(rcu_dereference(rq->curr)))
3811 goto out;
3812
3813 if (set_nr_if_polling(rq->idle)) {
3814 trace_sched_wake_idle_without_ipi(cpu);
3815 } else {
3816 rq_lock_irqsave(rq, &rf);
3817 if (is_idle_task(rq->curr))
3818 smp_send_reschedule(cpu);
3819 /* Else CPU is not idle, do nothing here: */
3820 rq_unlock_irqrestore(rq, &rf);
3821 }
3822
3823 out:
3824 rcu_read_unlock();
3825 }
3826 EXPORT_SYMBOL_GPL(wake_up_if_idle);
3827
cpus_share_cache(int this_cpu,int that_cpu)3828 bool cpus_share_cache(int this_cpu, int that_cpu)
3829 {
3830 if (this_cpu == that_cpu)
3831 return true;
3832
3833 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3834 }
3835
ttwu_queue_cond(struct task_struct * p,int cpu)3836 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3837 {
3838 /*
3839 * Do not complicate things with the async wake_list while the CPU is
3840 * in hotplug state.
3841 */
3842 if (!cpu_active(cpu))
3843 return false;
3844
3845 /* Ensure the task will still be allowed to run on the CPU. */
3846 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3847 return false;
3848
3849 /*
3850 * If the CPU does not share cache, then queue the task on the
3851 * remote rqs wakelist to avoid accessing remote data.
3852 */
3853 if (!cpus_share_cache(smp_processor_id(), cpu))
3854 return true;
3855
3856 if (cpu == smp_processor_id())
3857 return false;
3858
3859 /*
3860 * If the wakee cpu is idle, or the task is descheduling and the
3861 * only running task on the CPU, then use the wakelist to offload
3862 * the task activation to the idle (or soon-to-be-idle) CPU as
3863 * the current CPU is likely busy. nr_running is checked to
3864 * avoid unnecessary task stacking.
3865 *
3866 * Note that we can only get here with (wakee) p->on_rq=0,
3867 * p->on_cpu can be whatever, we've done the dequeue, so
3868 * the wakee has been accounted out of ->nr_running.
3869 */
3870 if (!cpu_rq(cpu)->nr_running)
3871 return true;
3872
3873 return false;
3874 }
3875
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3876 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3877 {
3878 bool cond = false;
3879
3880 trace_android_rvh_ttwu_cond(cpu, &cond);
3881
3882 if ((sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) || cond) {
3883 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3884 __ttwu_queue_wakelist(p, cpu, wake_flags);
3885 return true;
3886 }
3887
3888 return false;
3889 }
3890
3891 #else /* !CONFIG_SMP */
3892
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3893 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3894 {
3895 return false;
3896 }
3897
3898 #endif /* CONFIG_SMP */
3899
ttwu_queue(struct task_struct * p,int cpu,int wake_flags)3900 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3901 {
3902 struct rq *rq = cpu_rq(cpu);
3903 struct rq_flags rf;
3904
3905 if (ttwu_queue_wakelist(p, cpu, wake_flags))
3906 return;
3907
3908 rq_lock(rq, &rf);
3909 update_rq_clock(rq);
3910 ttwu_do_activate(rq, p, wake_flags, &rf);
3911 rq_unlock(rq, &rf);
3912 }
3913
3914 /*
3915 * Invoked from try_to_wake_up() to check whether the task can be woken up.
3916 *
3917 * The caller holds p::pi_lock if p != current or has preemption
3918 * disabled when p == current.
3919 *
3920 * The rules of PREEMPT_RT saved_state:
3921 *
3922 * The related locking code always holds p::pi_lock when updating
3923 * p::saved_state, which means the code is fully serialized in both cases.
3924 *
3925 * The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
3926 * bits set. This allows to distinguish all wakeup scenarios.
3927 */
3928 static __always_inline
ttwu_state_match(struct task_struct * p,unsigned int state,int * success)3929 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
3930 {
3931 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
3932 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
3933 state != TASK_RTLOCK_WAIT);
3934 }
3935
3936 if (READ_ONCE(p->__state) & state) {
3937 *success = 1;
3938 return true;
3939 }
3940
3941 #ifdef CONFIG_PREEMPT_RT
3942 /*
3943 * Saved state preserves the task state across blocking on
3944 * an RT lock. If the state matches, set p::saved_state to
3945 * TASK_RUNNING, but do not wake the task because it waits
3946 * for a lock wakeup. Also indicate success because from
3947 * the regular waker's point of view this has succeeded.
3948 *
3949 * After acquiring the lock the task will restore p::__state
3950 * from p::saved_state which ensures that the regular
3951 * wakeup is not lost. The restore will also set
3952 * p::saved_state to TASK_RUNNING so any further tests will
3953 * not result in false positives vs. @success
3954 */
3955 if (p->saved_state & state) {
3956 p->saved_state = TASK_RUNNING;
3957 *success = 1;
3958 }
3959 #endif
3960 return false;
3961 }
3962
3963 /*
3964 * Notes on Program-Order guarantees on SMP systems.
3965 *
3966 * MIGRATION
3967 *
3968 * The basic program-order guarantee on SMP systems is that when a task [t]
3969 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
3970 * execution on its new CPU [c1].
3971 *
3972 * For migration (of runnable tasks) this is provided by the following means:
3973 *
3974 * A) UNLOCK of the rq(c0)->lock scheduling out task t
3975 * B) migration for t is required to synchronize *both* rq(c0)->lock and
3976 * rq(c1)->lock (if not at the same time, then in that order).
3977 * C) LOCK of the rq(c1)->lock scheduling in task
3978 *
3979 * Release/acquire chaining guarantees that B happens after A and C after B.
3980 * Note: the CPU doing B need not be c0 or c1
3981 *
3982 * Example:
3983 *
3984 * CPU0 CPU1 CPU2
3985 *
3986 * LOCK rq(0)->lock
3987 * sched-out X
3988 * sched-in Y
3989 * UNLOCK rq(0)->lock
3990 *
3991 * LOCK rq(0)->lock // orders against CPU0
3992 * dequeue X
3993 * UNLOCK rq(0)->lock
3994 *
3995 * LOCK rq(1)->lock
3996 * enqueue X
3997 * UNLOCK rq(1)->lock
3998 *
3999 * LOCK rq(1)->lock // orders against CPU2
4000 * sched-out Z
4001 * sched-in X
4002 * UNLOCK rq(1)->lock
4003 *
4004 *
4005 * BLOCKING -- aka. SLEEP + WAKEUP
4006 *
4007 * For blocking we (obviously) need to provide the same guarantee as for
4008 * migration. However the means are completely different as there is no lock
4009 * chain to provide order. Instead we do:
4010 *
4011 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4012 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4013 *
4014 * Example:
4015 *
4016 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4017 *
4018 * LOCK rq(0)->lock LOCK X->pi_lock
4019 * dequeue X
4020 * sched-out X
4021 * smp_store_release(X->on_cpu, 0);
4022 *
4023 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4024 * X->state = WAKING
4025 * set_task_cpu(X,2)
4026 *
4027 * LOCK rq(2)->lock
4028 * enqueue X
4029 * X->state = RUNNING
4030 * UNLOCK rq(2)->lock
4031 *
4032 * LOCK rq(2)->lock // orders against CPU1
4033 * sched-out Z
4034 * sched-in X
4035 * UNLOCK rq(2)->lock
4036 *
4037 * UNLOCK X->pi_lock
4038 * UNLOCK rq(0)->lock
4039 *
4040 *
4041 * However, for wakeups there is a second guarantee we must provide, namely we
4042 * must ensure that CONDITION=1 done by the caller can not be reordered with
4043 * accesses to the task state; see try_to_wake_up() and set_current_state().
4044 */
4045
4046 /**
4047 * try_to_wake_up - wake up a thread
4048 * @p: the thread to be awakened
4049 * @state: the mask of task states that can be woken
4050 * @wake_flags: wake modifier flags (WF_*)
4051 *
4052 * Conceptually does:
4053 *
4054 * If (@state & @p->state) @p->state = TASK_RUNNING.
4055 *
4056 * If the task was not queued/runnable, also place it back on a runqueue.
4057 *
4058 * This function is atomic against schedule() which would dequeue the task.
4059 *
4060 * It issues a full memory barrier before accessing @p->state, see the comment
4061 * with set_current_state().
4062 *
4063 * Uses p->pi_lock to serialize against concurrent wake-ups.
4064 *
4065 * Relies on p->pi_lock stabilizing:
4066 * - p->sched_class
4067 * - p->cpus_ptr
4068 * - p->sched_task_group
4069 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4070 *
4071 * Tries really hard to only take one task_rq(p)->lock for performance.
4072 * Takes rq->lock in:
4073 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4074 * - ttwu_queue() -- new rq, for enqueue of the task;
4075 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4076 *
4077 * As a consequence we race really badly with just about everything. See the
4078 * many memory barriers and their comments for details.
4079 *
4080 * Return: %true if @p->state changes (an actual wakeup was done),
4081 * %false otherwise.
4082 */
4083 static int
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)4084 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4085 {
4086 unsigned long flags;
4087 int cpu, success = 0;
4088
4089 preempt_disable();
4090 if (p == current) {
4091 /*
4092 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4093 * == smp_processor_id()'. Together this means we can special
4094 * case the whole 'p->on_rq && ttwu_runnable()' case below
4095 * without taking any locks.
4096 *
4097 * In particular:
4098 * - we rely on Program-Order guarantees for all the ordering,
4099 * - we're serialized against set_special_state() by virtue of
4100 * it disabling IRQs (this allows not taking ->pi_lock).
4101 */
4102 if (!ttwu_state_match(p, state, &success))
4103 goto out;
4104
4105 trace_sched_waking(p);
4106 WRITE_ONCE(p->__state, TASK_RUNNING);
4107 trace_sched_wakeup(p);
4108 goto out;
4109 }
4110
4111 /*
4112 * If we are going to wake up a thread waiting for CONDITION we
4113 * need to ensure that CONDITION=1 done by the caller can not be
4114 * reordered with p->state check below. This pairs with smp_store_mb()
4115 * in set_current_state() that the waiting thread does.
4116 */
4117 raw_spin_lock_irqsave(&p->pi_lock, flags);
4118 smp_mb__after_spinlock();
4119 if (!ttwu_state_match(p, state, &success))
4120 goto unlock;
4121
4122 #ifdef CONFIG_FREEZER
4123 /*
4124 * If we're going to wake up a thread which may be frozen, then
4125 * we can only do so if we have an active CPU which is capable of
4126 * running it. This may not be the case when resuming from suspend,
4127 * as the secondary CPUs may not yet be back online. See __thaw_task()
4128 * for the actual wakeup.
4129 */
4130 if (unlikely(frozen_or_skipped(p)) &&
4131 !cpumask_intersects(cpu_active_mask, task_cpu_possible_mask(p)))
4132 goto unlock;
4133 #endif
4134
4135 trace_sched_waking(p);
4136
4137 /*
4138 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4139 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4140 * in smp_cond_load_acquire() below.
4141 *
4142 * sched_ttwu_pending() try_to_wake_up()
4143 * STORE p->on_rq = 1 LOAD p->state
4144 * UNLOCK rq->lock
4145 *
4146 * __schedule() (switch to task 'p')
4147 * LOCK rq->lock smp_rmb();
4148 * smp_mb__after_spinlock();
4149 * UNLOCK rq->lock
4150 *
4151 * [task p]
4152 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4153 *
4154 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4155 * __schedule(). See the comment for smp_mb__after_spinlock().
4156 *
4157 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
4158 */
4159 smp_rmb();
4160 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4161 goto unlock;
4162
4163 if (READ_ONCE(p->__state) & TASK_UNINTERRUPTIBLE)
4164 trace_sched_blocked_reason(p);
4165
4166 #ifdef CONFIG_SMP
4167 /*
4168 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4169 * possible to, falsely, observe p->on_cpu == 0.
4170 *
4171 * One must be running (->on_cpu == 1) in order to remove oneself
4172 * from the runqueue.
4173 *
4174 * __schedule() (switch to task 'p') try_to_wake_up()
4175 * STORE p->on_cpu = 1 LOAD p->on_rq
4176 * UNLOCK rq->lock
4177 *
4178 * __schedule() (put 'p' to sleep)
4179 * LOCK rq->lock smp_rmb();
4180 * smp_mb__after_spinlock();
4181 * STORE p->on_rq = 0 LOAD p->on_cpu
4182 *
4183 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4184 * __schedule(). See the comment for smp_mb__after_spinlock().
4185 *
4186 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4187 * schedule()'s deactivate_task() has 'happened' and p will no longer
4188 * care about it's own p->state. See the comment in __schedule().
4189 */
4190 smp_acquire__after_ctrl_dep();
4191
4192 /*
4193 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4194 * == 0), which means we need to do an enqueue, change p->state to
4195 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4196 * enqueue, such as ttwu_queue_wakelist().
4197 */
4198 WRITE_ONCE(p->__state, TASK_WAKING);
4199
4200 /*
4201 * If the owning (remote) CPU is still in the middle of schedule() with
4202 * this task as prev, considering queueing p on the remote CPUs wake_list
4203 * which potentially sends an IPI instead of spinning on p->on_cpu to
4204 * let the waker make forward progress. This is safe because IRQs are
4205 * disabled and the IPI will deliver after on_cpu is cleared.
4206 *
4207 * Ensure we load task_cpu(p) after p->on_cpu:
4208 *
4209 * set_task_cpu(p, cpu);
4210 * STORE p->cpu = @cpu
4211 * __schedule() (switch to task 'p')
4212 * LOCK rq->lock
4213 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4214 * STORE p->on_cpu = 1 LOAD p->cpu
4215 *
4216 * to ensure we observe the correct CPU on which the task is currently
4217 * scheduling.
4218 */
4219 if (smp_load_acquire(&p->on_cpu) &&
4220 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4221 goto unlock;
4222
4223 /*
4224 * If the owning (remote) CPU is still in the middle of schedule() with
4225 * this task as prev, wait until it's done referencing the task.
4226 *
4227 * Pairs with the smp_store_release() in finish_task().
4228 *
4229 * This ensures that tasks getting woken will be fully ordered against
4230 * their previous state and preserve Program Order.
4231 */
4232 smp_cond_load_acquire(&p->on_cpu, !VAL);
4233
4234 trace_android_rvh_try_to_wake_up(p);
4235
4236 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
4237 if (task_cpu(p) != cpu) {
4238 if (p->in_iowait) {
4239 delayacct_blkio_end(p);
4240 atomic_dec(&task_rq(p)->nr_iowait);
4241 }
4242
4243 wake_flags |= WF_MIGRATED;
4244 psi_ttwu_dequeue(p);
4245 set_task_cpu(p, cpu);
4246 }
4247 #else
4248 cpu = task_cpu(p);
4249 #endif /* CONFIG_SMP */
4250
4251 ttwu_queue(p, cpu, wake_flags);
4252 unlock:
4253 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4254 out:
4255 if (success) {
4256 trace_android_rvh_try_to_wake_up_success(p);
4257 ttwu_stat(p, task_cpu(p), wake_flags);
4258 }
4259 preempt_enable();
4260
4261 return success;
4262 }
4263
4264 /**
4265 * try_invoke_on_locked_down_task - Invoke a function on task in fixed state
4266 * @p: Process for which the function is to be invoked, can be @current.
4267 * @func: Function to invoke.
4268 * @arg: Argument to function.
4269 *
4270 * If the specified task can be quickly locked into a definite state
4271 * (either sleeping or on a given runqueue), arrange to keep it in that
4272 * state while invoking @func(@arg). This function can use ->on_rq and
4273 * task_curr() to work out what the state is, if required. Given that
4274 * @func can be invoked with a runqueue lock held, it had better be quite
4275 * lightweight.
4276 *
4277 * Returns:
4278 * @false if the task slipped out from under the locks.
4279 * @true if the task was locked onto a runqueue or is sleeping.
4280 * However, @func can override this by returning @false.
4281 */
try_invoke_on_locked_down_task(struct task_struct * p,bool (* func)(struct task_struct * t,void * arg),void * arg)4282 bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg)
4283 {
4284 struct rq_flags rf;
4285 bool ret = false;
4286 struct rq *rq;
4287
4288 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4289 if (p->on_rq) {
4290 rq = __task_rq_lock(p, &rf);
4291 if (task_rq(p) == rq)
4292 ret = func(p, arg);
4293 rq_unlock(rq, &rf);
4294 } else {
4295 switch (READ_ONCE(p->__state)) {
4296 case TASK_RUNNING:
4297 case TASK_WAKING:
4298 break;
4299 default:
4300 smp_rmb(); // See smp_rmb() comment in try_to_wake_up().
4301 if (!p->on_rq)
4302 ret = func(p, arg);
4303 }
4304 }
4305 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4306 return ret;
4307 }
4308
4309 /**
4310 * wake_up_process - Wake up a specific process
4311 * @p: The process to be woken up.
4312 *
4313 * Attempt to wake up the nominated process and move it to the set of runnable
4314 * processes.
4315 *
4316 * Return: 1 if the process was woken up, 0 if it was already running.
4317 *
4318 * This function executes a full memory barrier before accessing the task state.
4319 */
wake_up_process(struct task_struct * p)4320 int wake_up_process(struct task_struct *p)
4321 {
4322 return try_to_wake_up(p, TASK_NORMAL, 0);
4323 }
4324 EXPORT_SYMBOL(wake_up_process);
4325
wake_up_state(struct task_struct * p,unsigned int state)4326 int wake_up_state(struct task_struct *p, unsigned int state)
4327 {
4328 return try_to_wake_up(p, state, 0);
4329 }
4330 EXPORT_SYMBOL(wake_up_state);
4331
4332 /*
4333 * Perform scheduler related setup for a newly forked process p.
4334 * p is forked by current.
4335 *
4336 * __sched_fork() is basic setup used by init_idle() too:
4337 */
__sched_fork(unsigned long clone_flags,struct task_struct * p)4338 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4339 {
4340 p->on_rq = 0;
4341
4342 p->se.on_rq = 0;
4343 p->se.exec_start = 0;
4344 p->se.sum_exec_runtime = 0;
4345 p->se.prev_sum_exec_runtime = 0;
4346 p->se.nr_migrations = 0;
4347 p->se.vruntime = 0;
4348 INIT_LIST_HEAD(&p->se.group_node);
4349
4350 #ifdef CONFIG_FAIR_GROUP_SCHED
4351 p->se.cfs_rq = NULL;
4352 #endif
4353
4354 trace_android_rvh_sched_fork_init(p);
4355
4356 #ifdef CONFIG_SCHEDSTATS
4357 /* Even if schedstat is disabled, there should not be garbage */
4358 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
4359 #endif
4360
4361 RB_CLEAR_NODE(&p->dl.rb_node);
4362 init_dl_task_timer(&p->dl);
4363 init_dl_inactive_task_timer(&p->dl);
4364 __dl_clear_params(p);
4365
4366 INIT_LIST_HEAD(&p->rt.run_list);
4367 p->rt.timeout = 0;
4368 p->rt.time_slice = sched_rr_timeslice;
4369 p->rt.on_rq = 0;
4370 p->rt.on_list = 0;
4371
4372 #ifdef CONFIG_PREEMPT_NOTIFIERS
4373 INIT_HLIST_HEAD(&p->preempt_notifiers);
4374 #endif
4375
4376 #ifdef CONFIG_COMPACTION
4377 p->capture_control = NULL;
4378 #endif
4379 init_numa_balancing(clone_flags, p);
4380 #ifdef CONFIG_SMP
4381 p->wake_entry.u_flags = CSD_TYPE_TTWU;
4382 p->migration_pending = NULL;
4383 #endif
4384 }
4385
4386 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4387
4388 #ifdef CONFIG_NUMA_BALANCING
4389
set_numabalancing_state(bool enabled)4390 void set_numabalancing_state(bool enabled)
4391 {
4392 if (enabled)
4393 static_branch_enable(&sched_numa_balancing);
4394 else
4395 static_branch_disable(&sched_numa_balancing);
4396 }
4397
4398 #ifdef CONFIG_PROC_SYSCTL
sysctl_numa_balancing(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4399 int sysctl_numa_balancing(struct ctl_table *table, int write,
4400 void *buffer, size_t *lenp, loff_t *ppos)
4401 {
4402 struct ctl_table t;
4403 int err;
4404 int state = static_branch_likely(&sched_numa_balancing);
4405
4406 if (write && !capable(CAP_SYS_ADMIN))
4407 return -EPERM;
4408
4409 t = *table;
4410 t.data = &state;
4411 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4412 if (err < 0)
4413 return err;
4414 if (write)
4415 set_numabalancing_state(state);
4416 return err;
4417 }
4418 #endif
4419 #endif
4420
4421 #ifdef CONFIG_SCHEDSTATS
4422
4423 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4424
set_schedstats(bool enabled)4425 static void set_schedstats(bool enabled)
4426 {
4427 if (enabled)
4428 static_branch_enable(&sched_schedstats);
4429 else
4430 static_branch_disable(&sched_schedstats);
4431 }
4432
force_schedstat_enabled(void)4433 void force_schedstat_enabled(void)
4434 {
4435 if (!schedstat_enabled()) {
4436 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4437 static_branch_enable(&sched_schedstats);
4438 }
4439 }
4440
setup_schedstats(char * str)4441 static int __init setup_schedstats(char *str)
4442 {
4443 int ret = 0;
4444 if (!str)
4445 goto out;
4446
4447 if (!strcmp(str, "enable")) {
4448 set_schedstats(true);
4449 ret = 1;
4450 } else if (!strcmp(str, "disable")) {
4451 set_schedstats(false);
4452 ret = 1;
4453 }
4454 out:
4455 if (!ret)
4456 pr_warn("Unable to parse schedstats=\n");
4457
4458 return ret;
4459 }
4460 __setup("schedstats=", setup_schedstats);
4461
4462 #ifdef CONFIG_PROC_SYSCTL
sysctl_schedstats(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4463 int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
4464 size_t *lenp, loff_t *ppos)
4465 {
4466 struct ctl_table t;
4467 int err;
4468 int state = static_branch_likely(&sched_schedstats);
4469
4470 if (write && !capable(CAP_SYS_ADMIN))
4471 return -EPERM;
4472
4473 t = *table;
4474 t.data = &state;
4475 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4476 if (err < 0)
4477 return err;
4478 if (write)
4479 set_schedstats(state);
4480 return err;
4481 }
4482 #endif /* CONFIG_PROC_SYSCTL */
4483 #endif /* CONFIG_SCHEDSTATS */
4484
4485 /*
4486 * fork()/clone()-time setup:
4487 */
sched_fork(unsigned long clone_flags,struct task_struct * p)4488 int sched_fork(unsigned long clone_flags, struct task_struct *p)
4489 {
4490 trace_android_rvh_sched_fork(p);
4491
4492 __sched_fork(clone_flags, p);
4493 /*
4494 * We mark the process as NEW here. This guarantees that
4495 * nobody will actually run it, and a signal or other external
4496 * event cannot wake it up and insert it on the runqueue either.
4497 */
4498 p->__state = TASK_NEW;
4499
4500 /*
4501 * Make sure we do not leak PI boosting priority to the child.
4502 */
4503 p->prio = current->normal_prio;
4504 trace_android_rvh_prepare_prio_fork(p);
4505
4506 uclamp_fork(p);
4507
4508 /*
4509 * Revert to default priority/policy on fork if requested.
4510 */
4511 if (unlikely(p->sched_reset_on_fork)) {
4512 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4513 p->policy = SCHED_NORMAL;
4514 p->static_prio = NICE_TO_PRIO(0);
4515 p->rt_priority = 0;
4516 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4517 p->static_prio = NICE_TO_PRIO(0);
4518
4519 p->prio = p->normal_prio = p->static_prio;
4520 set_load_weight(p, false);
4521
4522 /*
4523 * We don't need the reset flag anymore after the fork. It has
4524 * fulfilled its duty:
4525 */
4526 p->sched_reset_on_fork = 0;
4527 }
4528
4529 if (dl_prio(p->prio))
4530 return -EAGAIN;
4531 else if (rt_prio(p->prio))
4532 p->sched_class = &rt_sched_class;
4533 else
4534 p->sched_class = &fair_sched_class;
4535
4536 init_entity_runnable_average(&p->se);
4537 trace_android_rvh_finish_prio_fork(p);
4538
4539
4540
4541 #ifdef CONFIG_SCHED_INFO
4542 if (likely(sched_info_on()))
4543 memset(&p->sched_info, 0, sizeof(p->sched_info));
4544 #endif
4545 #if defined(CONFIG_SMP)
4546 p->on_cpu = 0;
4547 #endif
4548 init_task_preempt_count(p);
4549 #ifdef CONFIG_SMP
4550 plist_node_init(&p->pushable_tasks, MAX_PRIO);
4551 RB_CLEAR_NODE(&p->pushable_dl_tasks);
4552 #endif
4553 return 0;
4554 }
4555
sched_cgroup_fork(struct task_struct * p,struct kernel_clone_args * kargs)4556 void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4557 {
4558 unsigned long flags;
4559
4560 /*
4561 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4562 * required yet, but lockdep gets upset if rules are violated.
4563 */
4564 raw_spin_lock_irqsave(&p->pi_lock, flags);
4565 #ifdef CONFIG_CGROUP_SCHED
4566 if (1) {
4567 struct task_group *tg;
4568 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4569 struct task_group, css);
4570 tg = autogroup_task_group(p, tg);
4571 p->sched_task_group = tg;
4572 }
4573 #endif
4574 rseq_migrate(p);
4575 /*
4576 * We're setting the CPU for the first time, we don't migrate,
4577 * so use __set_task_cpu().
4578 */
4579 __set_task_cpu(p, smp_processor_id());
4580 if (p->sched_class->task_fork)
4581 p->sched_class->task_fork(p);
4582 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4583 }
4584
sched_post_fork(struct task_struct * p)4585 void sched_post_fork(struct task_struct *p)
4586 {
4587 uclamp_post_fork(p);
4588 }
4589
to_ratio(u64 period,u64 runtime)4590 unsigned long to_ratio(u64 period, u64 runtime)
4591 {
4592 if (runtime == RUNTIME_INF)
4593 return BW_UNIT;
4594
4595 /*
4596 * Doing this here saves a lot of checks in all
4597 * the calling paths, and returning zero seems
4598 * safe for them anyway.
4599 */
4600 if (period == 0)
4601 return 0;
4602
4603 return div64_u64(runtime << BW_SHIFT, period);
4604 }
4605
4606 /*
4607 * wake_up_new_task - wake up a newly created task for the first time.
4608 *
4609 * This function will do some initial scheduler statistics housekeeping
4610 * that must be done for every newly created context, then puts the task
4611 * on the runqueue and wakes it.
4612 */
wake_up_new_task(struct task_struct * p)4613 void wake_up_new_task(struct task_struct *p)
4614 {
4615 struct rq_flags rf;
4616 struct rq *rq;
4617
4618 trace_android_rvh_wake_up_new_task(p);
4619
4620 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4621 WRITE_ONCE(p->__state, TASK_RUNNING);
4622 #ifdef CONFIG_SMP
4623 /*
4624 * Fork balancing, do it here and not earlier because:
4625 * - cpus_ptr can change in the fork path
4626 * - any previously selected CPU might disappear through hotplug
4627 *
4628 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4629 * as we're not fully set-up yet.
4630 */
4631 p->recent_used_cpu = task_cpu(p);
4632 rseq_migrate(p);
4633 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
4634 #endif
4635 rq = __task_rq_lock(p, &rf);
4636 update_rq_clock(rq);
4637 post_init_entity_util_avg(p);
4638 trace_android_rvh_new_task_stats(p);
4639
4640 activate_task(rq, p, ENQUEUE_NOCLOCK);
4641 trace_sched_wakeup_new(p);
4642 check_preempt_curr(rq, p, WF_FORK);
4643 #ifdef CONFIG_SMP
4644 if (p->sched_class->task_woken) {
4645 /*
4646 * Nothing relies on rq->lock after this, so it's fine to
4647 * drop it.
4648 */
4649 rq_unpin_lock(rq, &rf);
4650 p->sched_class->task_woken(rq, p);
4651 rq_repin_lock(rq, &rf);
4652 }
4653 #endif
4654 task_rq_unlock(rq, p, &rf);
4655 }
4656
4657 #ifdef CONFIG_PREEMPT_NOTIFIERS
4658
4659 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4660
preempt_notifier_inc(void)4661 void preempt_notifier_inc(void)
4662 {
4663 static_branch_inc(&preempt_notifier_key);
4664 }
4665 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4666
preempt_notifier_dec(void)4667 void preempt_notifier_dec(void)
4668 {
4669 static_branch_dec(&preempt_notifier_key);
4670 }
4671 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4672
4673 /**
4674 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4675 * @notifier: notifier struct to register
4676 */
preempt_notifier_register(struct preempt_notifier * notifier)4677 void preempt_notifier_register(struct preempt_notifier *notifier)
4678 {
4679 if (!static_branch_unlikely(&preempt_notifier_key))
4680 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4681
4682 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
4683 }
4684 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4685
4686 /**
4687 * preempt_notifier_unregister - no longer interested in preemption notifications
4688 * @notifier: notifier struct to unregister
4689 *
4690 * This is *not* safe to call from within a preemption notifier.
4691 */
preempt_notifier_unregister(struct preempt_notifier * notifier)4692 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4693 {
4694 hlist_del(¬ifier->link);
4695 }
4696 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4697
__fire_sched_in_preempt_notifiers(struct task_struct * curr)4698 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4699 {
4700 struct preempt_notifier *notifier;
4701
4702 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4703 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4704 }
4705
fire_sched_in_preempt_notifiers(struct task_struct * curr)4706 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4707 {
4708 if (static_branch_unlikely(&preempt_notifier_key))
4709 __fire_sched_in_preempt_notifiers(curr);
4710 }
4711
4712 static void
__fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4713 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4714 struct task_struct *next)
4715 {
4716 struct preempt_notifier *notifier;
4717
4718 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4719 notifier->ops->sched_out(notifier, next);
4720 }
4721
4722 static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4723 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4724 struct task_struct *next)
4725 {
4726 if (static_branch_unlikely(&preempt_notifier_key))
4727 __fire_sched_out_preempt_notifiers(curr, next);
4728 }
4729
4730 #else /* !CONFIG_PREEMPT_NOTIFIERS */
4731
fire_sched_in_preempt_notifiers(struct task_struct * curr)4732 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4733 {
4734 }
4735
4736 static inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4737 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4738 struct task_struct *next)
4739 {
4740 }
4741
4742 #endif /* CONFIG_PREEMPT_NOTIFIERS */
4743
prepare_task(struct task_struct * next)4744 static inline void prepare_task(struct task_struct *next)
4745 {
4746 #ifdef CONFIG_SMP
4747 /*
4748 * Claim the task as running, we do this before switching to it
4749 * such that any running task will have this set.
4750 *
4751 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4752 * its ordering comment.
4753 */
4754 WRITE_ONCE(next->on_cpu, 1);
4755 #endif
4756 }
4757
finish_task(struct task_struct * prev)4758 static inline void finish_task(struct task_struct *prev)
4759 {
4760 #ifdef CONFIG_SMP
4761 /*
4762 * This must be the very last reference to @prev from this CPU. After
4763 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4764 * must ensure this doesn't happen until the switch is completely
4765 * finished.
4766 *
4767 * In particular, the load of prev->state in finish_task_switch() must
4768 * happen before this.
4769 *
4770 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
4771 */
4772 smp_store_release(&prev->on_cpu, 0);
4773 #endif
4774 }
4775
4776 #ifdef CONFIG_SMP
4777
do_balance_callbacks(struct rq * rq,struct callback_head * head)4778 static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
4779 {
4780 void (*func)(struct rq *rq);
4781 struct callback_head *next;
4782
4783 lockdep_assert_rq_held(rq);
4784
4785 while (head) {
4786 func = (void (*)(struct rq *))head->func;
4787 next = head->next;
4788 head->next = NULL;
4789 head = next;
4790
4791 func(rq);
4792 }
4793 }
4794
4795 static void balance_push(struct rq *rq);
4796
4797 /*
4798 * balance_push_callback is a right abuse of the callback interface and plays
4799 * by significantly different rules.
4800 *
4801 * Where the normal balance_callback's purpose is to be ran in the same context
4802 * that queued it (only later, when it's safe to drop rq->lock again),
4803 * balance_push_callback is specifically targeted at __schedule().
4804 *
4805 * This abuse is tolerated because it places all the unlikely/odd cases behind
4806 * a single test, namely: rq->balance_callback == NULL.
4807 */
4808 struct callback_head balance_push_callback = {
4809 .next = NULL,
4810 .func = (void (*)(struct callback_head *))balance_push,
4811 };
4812 EXPORT_SYMBOL_GPL(balance_push_callback);
4813
4814 static inline struct callback_head *
__splice_balance_callbacks(struct rq * rq,bool split)4815 __splice_balance_callbacks(struct rq *rq, bool split)
4816 {
4817 struct callback_head *head = rq->balance_callback;
4818
4819 if (likely(!head))
4820 return NULL;
4821
4822 lockdep_assert_rq_held(rq);
4823 /*
4824 * Must not take balance_push_callback off the list when
4825 * splice_balance_callbacks() and balance_callbacks() are not
4826 * in the same rq->lock section.
4827 *
4828 * In that case it would be possible for __schedule() to interleave
4829 * and observe the list empty.
4830 */
4831 if (split && head == &balance_push_callback)
4832 head = NULL;
4833 else
4834 rq->balance_callback = NULL;
4835
4836 return head;
4837 }
4838
splice_balance_callbacks(struct rq * rq)4839 static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
4840 {
4841 return __splice_balance_callbacks(rq, true);
4842 }
4843
__balance_callbacks(struct rq * rq)4844 void __balance_callbacks(struct rq *rq)
4845 {
4846 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
4847 }
4848 EXPORT_SYMBOL_GPL(__balance_callbacks);
4849
balance_callbacks(struct rq * rq,struct callback_head * head)4850 static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
4851 {
4852 unsigned long flags;
4853
4854 if (unlikely(head)) {
4855 raw_spin_rq_lock_irqsave(rq, flags);
4856 do_balance_callbacks(rq, head);
4857 raw_spin_rq_unlock_irqrestore(rq, flags);
4858 }
4859 }
4860
4861 #else
4862
__balance_callbacks(struct rq * rq)4863 static inline void __balance_callbacks(struct rq *rq)
4864 {
4865 }
4866
splice_balance_callbacks(struct rq * rq)4867 static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
4868 {
4869 return NULL;
4870 }
4871
balance_callbacks(struct rq * rq,struct callback_head * head)4872 static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
4873 {
4874 }
4875
4876 #endif
4877
4878 static inline void
prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf)4879 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
4880 {
4881 /*
4882 * Since the runqueue lock will be released by the next
4883 * task (which is an invalid locking op but in the case
4884 * of the scheduler it's an obvious special-case), so we
4885 * do an early lockdep release here:
4886 */
4887 rq_unpin_lock(rq, rf);
4888 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
4889 #ifdef CONFIG_DEBUG_SPINLOCK
4890 /* this is a valid case when another task releases the spinlock */
4891 rq_lockp(rq)->owner = next;
4892 #endif
4893 }
4894
finish_lock_switch(struct rq * rq)4895 static inline void finish_lock_switch(struct rq *rq)
4896 {
4897 /*
4898 * If we are tracking spinlock dependencies then we have to
4899 * fix up the runqueue lock - which gets 'carried over' from
4900 * prev into current:
4901 */
4902 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
4903 __balance_callbacks(rq);
4904 raw_spin_rq_unlock_irq(rq);
4905 }
4906
4907 /*
4908 * NOP if the arch has not defined these:
4909 */
4910
4911 #ifndef prepare_arch_switch
4912 # define prepare_arch_switch(next) do { } while (0)
4913 #endif
4914
4915 #ifndef finish_arch_post_lock_switch
4916 # define finish_arch_post_lock_switch() do { } while (0)
4917 #endif
4918
kmap_local_sched_out(void)4919 static inline void kmap_local_sched_out(void)
4920 {
4921 #ifdef CONFIG_KMAP_LOCAL
4922 if (unlikely(current->kmap_ctrl.idx))
4923 __kmap_local_sched_out();
4924 #endif
4925 }
4926
kmap_local_sched_in(void)4927 static inline void kmap_local_sched_in(void)
4928 {
4929 #ifdef CONFIG_KMAP_LOCAL
4930 if (unlikely(current->kmap_ctrl.idx))
4931 __kmap_local_sched_in();
4932 #endif
4933 }
4934
4935 /**
4936 * prepare_task_switch - prepare to switch tasks
4937 * @rq: the runqueue preparing to switch
4938 * @prev: the current task that is being switched out
4939 * @next: the task we are going to switch to.
4940 *
4941 * This is called with the rq lock held and interrupts off. It must
4942 * be paired with a subsequent finish_task_switch after the context
4943 * switch.
4944 *
4945 * prepare_task_switch sets up locking and calls architecture specific
4946 * hooks.
4947 */
4948 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)4949 prepare_task_switch(struct rq *rq, struct task_struct *prev,
4950 struct task_struct *next)
4951 {
4952 kcov_prepare_switch(prev);
4953 sched_info_switch(rq, prev, next);
4954 perf_event_task_sched_out(prev, next);
4955 rseq_preempt(prev);
4956 fire_sched_out_preempt_notifiers(prev, next);
4957 kmap_local_sched_out();
4958 prepare_task(next);
4959 prepare_arch_switch(next);
4960 }
4961
4962 /**
4963 * finish_task_switch - clean up after a task-switch
4964 * @prev: the thread we just switched away from.
4965 *
4966 * finish_task_switch must be called after the context switch, paired
4967 * with a prepare_task_switch call before the context switch.
4968 * finish_task_switch will reconcile locking set up by prepare_task_switch,
4969 * and do any other architecture-specific cleanup actions.
4970 *
4971 * Note that we may have delayed dropping an mm in context_switch(). If
4972 * so, we finish that here outside of the runqueue lock. (Doing it
4973 * with the lock held can cause deadlocks; see schedule() for
4974 * details.)
4975 *
4976 * The context switch have flipped the stack from under us and restored the
4977 * local variables which were saved when this task called schedule() in the
4978 * past. prev == current is still correct but we need to recalculate this_rq
4979 * because prev may have moved to another CPU.
4980 */
finish_task_switch(struct task_struct * prev)4981 static struct rq *finish_task_switch(struct task_struct *prev)
4982 __releases(rq->lock)
4983 {
4984 struct rq *rq = this_rq();
4985 struct mm_struct *mm = rq->prev_mm;
4986 long prev_state;
4987
4988 /*
4989 * The previous task will have left us with a preempt_count of 2
4990 * because it left us after:
4991 *
4992 * schedule()
4993 * preempt_disable(); // 1
4994 * __schedule()
4995 * raw_spin_lock_irq(&rq->lock) // 2
4996 *
4997 * Also, see FORK_PREEMPT_COUNT.
4998 */
4999 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5000 "corrupted preempt_count: %s/%d/0x%x\n",
5001 current->comm, current->pid, preempt_count()))
5002 preempt_count_set(FORK_PREEMPT_COUNT);
5003
5004 rq->prev_mm = NULL;
5005
5006 /*
5007 * A task struct has one reference for the use as "current".
5008 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5009 * schedule one last time. The schedule call will never return, and
5010 * the scheduled task must drop that reference.
5011 *
5012 * We must observe prev->state before clearing prev->on_cpu (in
5013 * finish_task), otherwise a concurrent wakeup can get prev
5014 * running on another CPU and we could rave with its RUNNING -> DEAD
5015 * transition, resulting in a double drop.
5016 */
5017 prev_state = READ_ONCE(prev->__state);
5018 vtime_task_switch(prev);
5019 perf_event_task_sched_in(prev, current);
5020 finish_task(prev);
5021 tick_nohz_task_switch();
5022 finish_lock_switch(rq);
5023 finish_arch_post_lock_switch();
5024 kcov_finish_switch(current);
5025 /*
5026 * kmap_local_sched_out() is invoked with rq::lock held and
5027 * interrupts disabled. There is no requirement for that, but the
5028 * sched out code does not have an interrupt enabled section.
5029 * Restoring the maps on sched in does not require interrupts being
5030 * disabled either.
5031 */
5032 kmap_local_sched_in();
5033
5034 fire_sched_in_preempt_notifiers(current);
5035 /*
5036 * When switching through a kernel thread, the loop in
5037 * membarrier_{private,global}_expedited() may have observed that
5038 * kernel thread and not issued an IPI. It is therefore possible to
5039 * schedule between user->kernel->user threads without passing though
5040 * switch_mm(). Membarrier requires a barrier after storing to
5041 * rq->curr, before returning to userspace, so provide them here:
5042 *
5043 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5044 * provided by mmdrop(),
5045 * - a sync_core for SYNC_CORE.
5046 */
5047 if (mm) {
5048 membarrier_mm_sync_core_before_usermode(mm);
5049 mmdrop(mm);
5050 }
5051 if (unlikely(prev_state == TASK_DEAD)) {
5052 if (prev->sched_class->task_dead)
5053 prev->sched_class->task_dead(prev);
5054
5055 /*
5056 * Remove function-return probe instances associated with this
5057 * task and put them back on the free list.
5058 */
5059 kprobe_flush_task(prev);
5060 trace_android_rvh_flush_task(prev);
5061
5062 /* Task is done with its stack. */
5063 put_task_stack(prev);
5064
5065 put_task_struct_rcu_user(prev);
5066 }
5067
5068 return rq;
5069 }
5070
5071 /**
5072 * schedule_tail - first thing a freshly forked thread must call.
5073 * @prev: the thread we just switched away from.
5074 */
schedule_tail(struct task_struct * prev)5075 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5076 __releases(rq->lock)
5077 {
5078 /*
5079 * New tasks start with FORK_PREEMPT_COUNT, see there and
5080 * finish_task_switch() for details.
5081 *
5082 * finish_task_switch() will drop rq->lock() and lower preempt_count
5083 * and the preempt_enable() will end up enabling preemption (on
5084 * PREEMPT_COUNT kernels).
5085 */
5086
5087 finish_task_switch(prev);
5088 preempt_enable();
5089
5090 if (current->set_child_tid)
5091 put_user(task_pid_vnr(current), current->set_child_tid);
5092
5093 calculate_sigpending();
5094 }
5095
5096 /*
5097 * context_switch - switch to the new MM and the new thread's register state.
5098 */
5099 static __always_inline struct rq *
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf)5100 context_switch(struct rq *rq, struct task_struct *prev,
5101 struct task_struct *next, struct rq_flags *rf)
5102 {
5103 prepare_task_switch(rq, prev, next);
5104
5105 /*
5106 * For paravirt, this is coupled with an exit in switch_to to
5107 * combine the page table reload and the switch backend into
5108 * one hypercall.
5109 */
5110 arch_start_context_switch(prev);
5111
5112 /*
5113 * kernel -> kernel lazy + transfer active
5114 * user -> kernel lazy + mmgrab() active
5115 *
5116 * kernel -> user switch + mmdrop() active
5117 * user -> user switch
5118 */
5119 if (!next->mm) { // to kernel
5120 enter_lazy_tlb(prev->active_mm, next);
5121
5122 next->active_mm = prev->active_mm;
5123 if (prev->mm) // from user
5124 mmgrab(prev->active_mm);
5125 else
5126 prev->active_mm = NULL;
5127 } else { // to user
5128 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5129 /*
5130 * sys_membarrier() requires an smp_mb() between setting
5131 * rq->curr / membarrier_switch_mm() and returning to userspace.
5132 *
5133 * The below provides this either through switch_mm(), or in
5134 * case 'prev->active_mm == next->mm' through
5135 * finish_task_switch()'s mmdrop().
5136 */
5137 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5138 lru_gen_use_mm(next->mm);
5139
5140 if (!prev->mm) { // from kernel
5141 /* will mmdrop() in finish_task_switch(). */
5142 rq->prev_mm = prev->active_mm;
5143 prev->active_mm = NULL;
5144 }
5145 }
5146
5147 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
5148
5149 prepare_lock_switch(rq, next, rf);
5150
5151 /* Here we just switch the register state and the stack. */
5152 switch_to(prev, next, prev);
5153 barrier();
5154
5155 return finish_task_switch(prev);
5156 }
5157
5158 /*
5159 * nr_running and nr_context_switches:
5160 *
5161 * externally visible scheduler statistics: current number of runnable
5162 * threads, total number of context switches performed since bootup.
5163 */
nr_running(void)5164 unsigned int nr_running(void)
5165 {
5166 unsigned int i, sum = 0;
5167
5168 for_each_online_cpu(i)
5169 sum += cpu_rq(i)->nr_running;
5170
5171 return sum;
5172 }
5173 EXPORT_SYMBOL(nr_running);
5174
5175 /*
5176 * Check if only the current task is running on the CPU.
5177 *
5178 * Caution: this function does not check that the caller has disabled
5179 * preemption, thus the result might have a time-of-check-to-time-of-use
5180 * race. The caller is responsible to use it correctly, for example:
5181 *
5182 * - from a non-preemptible section (of course)
5183 *
5184 * - from a thread that is bound to a single CPU
5185 *
5186 * - in a loop with very short iterations (e.g. a polling loop)
5187 */
single_task_running(void)5188 bool single_task_running(void)
5189 {
5190 return raw_rq()->nr_running == 1;
5191 }
5192 EXPORT_SYMBOL(single_task_running);
5193
nr_context_switches(void)5194 unsigned long long nr_context_switches(void)
5195 {
5196 int i;
5197 unsigned long long sum = 0;
5198
5199 for_each_possible_cpu(i)
5200 sum += cpu_rq(i)->nr_switches;
5201
5202 return sum;
5203 }
5204
5205 /*
5206 * Consumers of these two interfaces, like for example the cpuidle menu
5207 * governor, are using nonsensical data. Preferring shallow idle state selection
5208 * for a CPU that has IO-wait which might not even end up running the task when
5209 * it does become runnable.
5210 */
5211
nr_iowait_cpu(int cpu)5212 unsigned int nr_iowait_cpu(int cpu)
5213 {
5214 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5215 }
5216
5217 /*
5218 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5219 *
5220 * The idea behind IO-wait account is to account the idle time that we could
5221 * have spend running if it were not for IO. That is, if we were to improve the
5222 * storage performance, we'd have a proportional reduction in IO-wait time.
5223 *
5224 * This all works nicely on UP, where, when a task blocks on IO, we account
5225 * idle time as IO-wait, because if the storage were faster, it could've been
5226 * running and we'd not be idle.
5227 *
5228 * This has been extended to SMP, by doing the same for each CPU. This however
5229 * is broken.
5230 *
5231 * Imagine for instance the case where two tasks block on one CPU, only the one
5232 * CPU will have IO-wait accounted, while the other has regular idle. Even
5233 * though, if the storage were faster, both could've ran at the same time,
5234 * utilising both CPUs.
5235 *
5236 * This means, that when looking globally, the current IO-wait accounting on
5237 * SMP is a lower bound, by reason of under accounting.
5238 *
5239 * Worse, since the numbers are provided per CPU, they are sometimes
5240 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5241 * associated with any one particular CPU, it can wake to another CPU than it
5242 * blocked on. This means the per CPU IO-wait number is meaningless.
5243 *
5244 * Task CPU affinities can make all that even more 'interesting'.
5245 */
5246
nr_iowait(void)5247 unsigned int nr_iowait(void)
5248 {
5249 unsigned int i, sum = 0;
5250
5251 for_each_possible_cpu(i)
5252 sum += nr_iowait_cpu(i);
5253
5254 return sum;
5255 }
5256
5257 #ifdef CONFIG_SMP
5258
5259 /*
5260 * sched_exec - execve() is a valuable balancing opportunity, because at
5261 * this point the task has the smallest effective memory and cache footprint.
5262 */
sched_exec(void)5263 void sched_exec(void)
5264 {
5265 struct task_struct *p = current;
5266 unsigned long flags;
5267 int dest_cpu;
5268 bool cond = false;
5269
5270 trace_android_rvh_sched_exec(&cond);
5271 if (cond)
5272 return;
5273
5274 raw_spin_lock_irqsave(&p->pi_lock, flags);
5275 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5276 if (dest_cpu == smp_processor_id())
5277 goto unlock;
5278
5279 if (likely(cpu_active(dest_cpu))) {
5280 struct migration_arg arg = { p, dest_cpu };
5281
5282 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5283 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5284 return;
5285 }
5286 unlock:
5287 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5288 }
5289
5290 #endif
5291
5292 DEFINE_PER_CPU(struct kernel_stat, kstat);
5293 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5294
5295 EXPORT_PER_CPU_SYMBOL(kstat);
5296 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5297
5298 /*
5299 * The function fair_sched_class.update_curr accesses the struct curr
5300 * and its field curr->exec_start; when called from task_sched_runtime(),
5301 * we observe a high rate of cache misses in practice.
5302 * Prefetching this data results in improved performance.
5303 */
prefetch_curr_exec_start(struct task_struct * p)5304 static inline void prefetch_curr_exec_start(struct task_struct *p)
5305 {
5306 #ifdef CONFIG_FAIR_GROUP_SCHED
5307 struct sched_entity *curr = (&p->se)->cfs_rq->curr;
5308 #else
5309 struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
5310 #endif
5311 prefetch(curr);
5312 prefetch(&curr->exec_start);
5313 }
5314
5315 /*
5316 * Return accounted runtime for the task.
5317 * In case the task is currently running, return the runtime plus current's
5318 * pending runtime that have not been accounted yet.
5319 */
task_sched_runtime(struct task_struct * p)5320 unsigned long long task_sched_runtime(struct task_struct *p)
5321 {
5322 struct rq_flags rf;
5323 struct rq *rq;
5324 u64 ns;
5325
5326 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5327 /*
5328 * 64-bit doesn't need locks to atomically read a 64-bit value.
5329 * So we have a optimization chance when the task's delta_exec is 0.
5330 * Reading ->on_cpu is racy, but this is ok.
5331 *
5332 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5333 * If we race with it entering CPU, unaccounted time is 0. This is
5334 * indistinguishable from the read occurring a few cycles earlier.
5335 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5336 * been accounted, so we're correct here as well.
5337 */
5338 if (!p->on_cpu || !task_on_rq_queued(p))
5339 return p->se.sum_exec_runtime;
5340 #endif
5341
5342 rq = task_rq_lock(p, &rf);
5343 /*
5344 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5345 * project cycles that may never be accounted to this
5346 * thread, breaking clock_gettime().
5347 */
5348 if (task_current(rq, p) && task_on_rq_queued(p)) {
5349 prefetch_curr_exec_start(p);
5350 update_rq_clock(rq);
5351 p->sched_class->update_curr(rq);
5352 }
5353 ns = p->se.sum_exec_runtime;
5354 task_rq_unlock(rq, p, &rf);
5355
5356 return ns;
5357 }
5358 EXPORT_SYMBOL_GPL(task_sched_runtime);
5359
5360 #ifdef CONFIG_SCHED_DEBUG
cpu_resched_latency(struct rq * rq)5361 static u64 cpu_resched_latency(struct rq *rq)
5362 {
5363 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5364 u64 resched_latency, now = rq_clock(rq);
5365 static bool warned_once;
5366
5367 if (sysctl_resched_latency_warn_once && warned_once)
5368 return 0;
5369
5370 if (!need_resched() || !latency_warn_ms)
5371 return 0;
5372
5373 if (system_state == SYSTEM_BOOTING)
5374 return 0;
5375
5376 if (!rq->last_seen_need_resched_ns) {
5377 rq->last_seen_need_resched_ns = now;
5378 rq->ticks_without_resched = 0;
5379 return 0;
5380 }
5381
5382 rq->ticks_without_resched++;
5383 resched_latency = now - rq->last_seen_need_resched_ns;
5384 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5385 return 0;
5386
5387 warned_once = true;
5388
5389 return resched_latency;
5390 }
5391
setup_resched_latency_warn_ms(char * str)5392 static int __init setup_resched_latency_warn_ms(char *str)
5393 {
5394 long val;
5395
5396 if ((kstrtol(str, 0, &val))) {
5397 pr_warn("Unable to set resched_latency_warn_ms\n");
5398 return 1;
5399 }
5400
5401 sysctl_resched_latency_warn_ms = val;
5402 return 1;
5403 }
5404 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5405 #else
cpu_resched_latency(struct rq * rq)5406 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5407 #endif /* CONFIG_SCHED_DEBUG */
5408
5409 /*
5410 * This function gets called by the timer code, with HZ frequency.
5411 * We call it with interrupts disabled.
5412 */
scheduler_tick(void)5413 void scheduler_tick(void)
5414 {
5415 int cpu = smp_processor_id();
5416 struct rq *rq = cpu_rq(cpu);
5417 struct task_struct *curr = rq->curr;
5418 struct rq_flags rf;
5419 unsigned long thermal_pressure;
5420 u64 resched_latency;
5421
5422 arch_scale_freq_tick();
5423 sched_clock_tick();
5424
5425 rq_lock(rq, &rf);
5426
5427 update_rq_clock(rq);
5428 trace_android_rvh_tick_entry(rq);
5429
5430 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
5431 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
5432 curr->sched_class->task_tick(rq, curr, 0);
5433 if (sched_feat(LATENCY_WARN))
5434 resched_latency = cpu_resched_latency(rq);
5435 calc_global_load_tick(rq);
5436
5437 rq_unlock(rq, &rf);
5438
5439 if (sched_feat(LATENCY_WARN) && resched_latency)
5440 resched_latency_warn(cpu, resched_latency);
5441
5442 perf_event_task_tick();
5443
5444 #ifdef CONFIG_SMP
5445 rq->idle_balance = idle_cpu(cpu);
5446 trigger_load_balance(rq);
5447 #endif
5448
5449 trace_android_vh_scheduler_tick(rq);
5450 }
5451
5452 #ifdef CONFIG_NO_HZ_FULL
5453
5454 struct tick_work {
5455 int cpu;
5456 atomic_t state;
5457 struct delayed_work work;
5458 };
5459 /* Values for ->state, see diagram below. */
5460 #define TICK_SCHED_REMOTE_OFFLINE 0
5461 #define TICK_SCHED_REMOTE_OFFLINING 1
5462 #define TICK_SCHED_REMOTE_RUNNING 2
5463
5464 /*
5465 * State diagram for ->state:
5466 *
5467 *
5468 * TICK_SCHED_REMOTE_OFFLINE
5469 * | ^
5470 * | |
5471 * | | sched_tick_remote()
5472 * | |
5473 * | |
5474 * +--TICK_SCHED_REMOTE_OFFLINING
5475 * | ^
5476 * | |
5477 * sched_tick_start() | | sched_tick_stop()
5478 * | |
5479 * V |
5480 * TICK_SCHED_REMOTE_RUNNING
5481 *
5482 *
5483 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5484 * and sched_tick_start() are happy to leave the state in RUNNING.
5485 */
5486
5487 static struct tick_work __percpu *tick_work_cpu;
5488
sched_tick_remote(struct work_struct * work)5489 static void sched_tick_remote(struct work_struct *work)
5490 {
5491 struct delayed_work *dwork = to_delayed_work(work);
5492 struct tick_work *twork = container_of(dwork, struct tick_work, work);
5493 int cpu = twork->cpu;
5494 struct rq *rq = cpu_rq(cpu);
5495 struct task_struct *curr;
5496 struct rq_flags rf;
5497 u64 delta;
5498 int os;
5499
5500 /*
5501 * Handle the tick only if it appears the remote CPU is running in full
5502 * dynticks mode. The check is racy by nature, but missing a tick or
5503 * having one too much is no big deal because the scheduler tick updates
5504 * statistics and checks timeslices in a time-independent way, regardless
5505 * of when exactly it is running.
5506 */
5507 if (!tick_nohz_tick_stopped_cpu(cpu))
5508 goto out_requeue;
5509
5510 rq_lock_irq(rq, &rf);
5511 curr = rq->curr;
5512 if (cpu_is_offline(cpu))
5513 goto out_unlock;
5514
5515 update_rq_clock(rq);
5516
5517 if (!is_idle_task(curr)) {
5518 /*
5519 * Make sure the next tick runs within a reasonable
5520 * amount of time.
5521 */
5522 delta = rq_clock_task(rq) - curr->se.exec_start;
5523 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5524 }
5525 curr->sched_class->task_tick(rq, curr, 0);
5526
5527 calc_load_nohz_remote(rq);
5528 out_unlock:
5529 rq_unlock_irq(rq, &rf);
5530 out_requeue:
5531
5532 /*
5533 * Run the remote tick once per second (1Hz). This arbitrary
5534 * frequency is large enough to avoid overload but short enough
5535 * to keep scheduler internal stats reasonably up to date. But
5536 * first update state to reflect hotplug activity if required.
5537 */
5538 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5539 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5540 if (os == TICK_SCHED_REMOTE_RUNNING)
5541 queue_delayed_work(system_unbound_wq, dwork, HZ);
5542 }
5543
sched_tick_start(int cpu)5544 static void sched_tick_start(int cpu)
5545 {
5546 int os;
5547 struct tick_work *twork;
5548
5549 if (housekeeping_cpu(cpu, HK_FLAG_TICK))
5550 return;
5551
5552 WARN_ON_ONCE(!tick_work_cpu);
5553
5554 twork = per_cpu_ptr(tick_work_cpu, cpu);
5555 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5556 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5557 if (os == TICK_SCHED_REMOTE_OFFLINE) {
5558 twork->cpu = cpu;
5559 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5560 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5561 }
5562 }
5563
5564 #ifdef CONFIG_HOTPLUG_CPU
sched_tick_stop(int cpu)5565 static void sched_tick_stop(int cpu)
5566 {
5567 struct tick_work *twork;
5568 int os;
5569
5570 if (housekeeping_cpu(cpu, HK_FLAG_TICK))
5571 return;
5572
5573 WARN_ON_ONCE(!tick_work_cpu);
5574
5575 twork = per_cpu_ptr(tick_work_cpu, cpu);
5576 /* There cannot be competing actions, but don't rely on stop-machine. */
5577 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5578 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5579 /* Don't cancel, as this would mess up the state machine. */
5580 }
5581 #endif /* CONFIG_HOTPLUG_CPU */
5582
sched_tick_offload_init(void)5583 int __init sched_tick_offload_init(void)
5584 {
5585 tick_work_cpu = alloc_percpu(struct tick_work);
5586 BUG_ON(!tick_work_cpu);
5587 return 0;
5588 }
5589
5590 #else /* !CONFIG_NO_HZ_FULL */
sched_tick_start(int cpu)5591 static inline void sched_tick_start(int cpu) { }
sched_tick_stop(int cpu)5592 static inline void sched_tick_stop(int cpu) { }
5593 #endif
5594
5595 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5596 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5597 /*
5598 * If the value passed in is equal to the current preempt count
5599 * then we just disabled preemption. Start timing the latency.
5600 */
preempt_latency_start(int val)5601 static inline void preempt_latency_start(int val)
5602 {
5603 if (preempt_count() == val) {
5604 unsigned long ip = get_lock_parent_ip();
5605 #ifdef CONFIG_DEBUG_PREEMPT
5606 current->preempt_disable_ip = ip;
5607 #endif
5608 trace_preempt_off(CALLER_ADDR0, ip);
5609 }
5610 }
5611
preempt_count_add(int val)5612 void preempt_count_add(int val)
5613 {
5614 #ifdef CONFIG_DEBUG_PREEMPT
5615 /*
5616 * Underflow?
5617 */
5618 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5619 return;
5620 #endif
5621 __preempt_count_add(val);
5622 #ifdef CONFIG_DEBUG_PREEMPT
5623 /*
5624 * Spinlock count overflowing soon?
5625 */
5626 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5627 PREEMPT_MASK - 10);
5628 #endif
5629 preempt_latency_start(val);
5630 }
5631 EXPORT_SYMBOL(preempt_count_add);
5632 NOKPROBE_SYMBOL(preempt_count_add);
5633
5634 /*
5635 * If the value passed in equals to the current preempt count
5636 * then we just enabled preemption. Stop timing the latency.
5637 */
preempt_latency_stop(int val)5638 static inline void preempt_latency_stop(int val)
5639 {
5640 if (preempt_count() == val)
5641 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5642 }
5643
preempt_count_sub(int val)5644 void preempt_count_sub(int val)
5645 {
5646 #ifdef CONFIG_DEBUG_PREEMPT
5647 /*
5648 * Underflow?
5649 */
5650 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5651 return;
5652 /*
5653 * Is the spinlock portion underflowing?
5654 */
5655 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5656 !(preempt_count() & PREEMPT_MASK)))
5657 return;
5658 #endif
5659
5660 preempt_latency_stop(val);
5661 __preempt_count_sub(val);
5662 }
5663 EXPORT_SYMBOL(preempt_count_sub);
5664 NOKPROBE_SYMBOL(preempt_count_sub);
5665
5666 #else
preempt_latency_start(int val)5667 static inline void preempt_latency_start(int val) { }
preempt_latency_stop(int val)5668 static inline void preempt_latency_stop(int val) { }
5669 #endif
5670
get_preempt_disable_ip(struct task_struct * p)5671 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5672 {
5673 #ifdef CONFIG_DEBUG_PREEMPT
5674 return p->preempt_disable_ip;
5675 #else
5676 return 0;
5677 #endif
5678 }
5679
5680 /*
5681 * Print scheduling while atomic bug:
5682 */
__schedule_bug(struct task_struct * prev)5683 static noinline void __schedule_bug(struct task_struct *prev)
5684 {
5685 /* Save this before calling printk(), since that will clobber it */
5686 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5687
5688 if (oops_in_progress)
5689 return;
5690
5691 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5692 prev->comm, prev->pid, preempt_count());
5693
5694 debug_show_held_locks(prev);
5695 print_modules();
5696 if (irqs_disabled())
5697 print_irqtrace_events(prev);
5698 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
5699 && in_atomic_preempt_off()) {
5700 pr_err("Preemption disabled at:");
5701 print_ip_sym(KERN_ERR, preempt_disable_ip);
5702 }
5703 check_panic_on_warn("scheduling while atomic");
5704
5705 trace_android_rvh_schedule_bug(prev);
5706
5707 dump_stack();
5708 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5709 }
5710
5711 /*
5712 * Various schedule()-time debugging checks and statistics:
5713 */
schedule_debug(struct task_struct * prev,bool preempt)5714 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5715 {
5716 #ifdef CONFIG_SCHED_STACK_END_CHECK
5717 if (task_stack_end_corrupted(prev))
5718 panic("corrupted stack end detected inside scheduler\n");
5719
5720 if (task_scs_end_corrupted(prev))
5721 panic("corrupted shadow stack detected inside scheduler\n");
5722 #endif
5723
5724 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5725 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5726 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5727 prev->comm, prev->pid, prev->non_block_count);
5728 dump_stack();
5729 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5730 }
5731 #endif
5732
5733 if (unlikely(in_atomic_preempt_off())) {
5734 __schedule_bug(prev);
5735 preempt_count_set(PREEMPT_DISABLED);
5736 }
5737 rcu_sleep_check();
5738 SCHED_WARN_ON(ct_state() == CONTEXT_USER);
5739
5740 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5741
5742 schedstat_inc(this_rq()->sched_count);
5743 }
5744
put_prev_task_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5745 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
5746 struct rq_flags *rf)
5747 {
5748 #ifdef CONFIG_SMP
5749 const struct sched_class *class;
5750 /*
5751 * We must do the balancing pass before put_prev_task(), such
5752 * that when we release the rq->lock the task is in the same
5753 * state as before we took rq->lock.
5754 *
5755 * We can terminate the balance pass as soon as we know there is
5756 * a runnable task of @class priority or higher.
5757 */
5758 for_class_range(class, prev->sched_class, &idle_sched_class) {
5759 if (class->balance(rq, prev, rf))
5760 break;
5761 }
5762 #endif
5763
5764 put_prev_task(rq, prev);
5765 }
5766
5767 /*
5768 * Pick up the highest-prio task:
5769 */
5770 static inline struct task_struct *
__pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5771 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5772 {
5773 const struct sched_class *class;
5774 struct task_struct *p;
5775
5776 /*
5777 * Optimization: we know that if all tasks are in the fair class we can
5778 * call that function directly, but only if the @prev task wasn't of a
5779 * higher scheduling class, because otherwise those lose the
5780 * opportunity to pull in more work from other CPUs.
5781 */
5782 if (likely(prev->sched_class <= &fair_sched_class &&
5783 rq->nr_running == rq->cfs.h_nr_running)) {
5784
5785 p = pick_next_task_fair(rq, prev, rf);
5786 if (unlikely(p == RETRY_TASK))
5787 goto restart;
5788
5789 /* Assume the next prioritized class is idle_sched_class */
5790 if (!p) {
5791 put_prev_task(rq, prev);
5792 p = pick_next_task_idle(rq);
5793 }
5794
5795 return p;
5796 }
5797
5798 restart:
5799 put_prev_task_balance(rq, prev, rf);
5800
5801 for_each_class(class) {
5802 p = class->pick_next_task(rq);
5803 if (p)
5804 return p;
5805 }
5806
5807 /* The idle class should always have a runnable task: */
5808 BUG();
5809 }
5810
5811 #ifdef CONFIG_SCHED_CORE
is_task_rq_idle(struct task_struct * t)5812 static inline bool is_task_rq_idle(struct task_struct *t)
5813 {
5814 return (task_rq(t)->idle == t);
5815 }
5816
cookie_equals(struct task_struct * a,unsigned long cookie)5817 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
5818 {
5819 return is_task_rq_idle(a) || (a->core_cookie == cookie);
5820 }
5821
cookie_match(struct task_struct * a,struct task_struct * b)5822 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
5823 {
5824 if (is_task_rq_idle(a) || is_task_rq_idle(b))
5825 return true;
5826
5827 return a->core_cookie == b->core_cookie;
5828 }
5829
5830 // XXX fairness/fwd progress conditions
5831 /*
5832 * Returns
5833 * - NULL if there is no runnable task for this class.
5834 * - the highest priority task for this runqueue if it matches
5835 * rq->core->core_cookie or its priority is greater than max.
5836 * - Else returns idle_task.
5837 */
5838 static struct task_struct *
pick_task(struct rq * rq,const struct sched_class * class,struct task_struct * max,bool in_fi)5839 pick_task(struct rq *rq, const struct sched_class *class, struct task_struct *max, bool in_fi)
5840 {
5841 struct task_struct *class_pick, *cookie_pick;
5842 unsigned long cookie = rq->core->core_cookie;
5843
5844 class_pick = class->pick_task(rq);
5845 if (!class_pick)
5846 return NULL;
5847
5848 if (!cookie) {
5849 /*
5850 * If class_pick is tagged, return it only if it has
5851 * higher priority than max.
5852 */
5853 if (max && class_pick->core_cookie &&
5854 prio_less(class_pick, max, in_fi))
5855 return idle_sched_class.pick_task(rq);
5856
5857 return class_pick;
5858 }
5859
5860 /*
5861 * If class_pick is idle or matches cookie, return early.
5862 */
5863 if (cookie_equals(class_pick, cookie))
5864 return class_pick;
5865
5866 cookie_pick = sched_core_find(rq, cookie);
5867
5868 /*
5869 * If class > max && class > cookie, it is the highest priority task on
5870 * the core (so far) and it must be selected, otherwise we must go with
5871 * the cookie pick in order to satisfy the constraint.
5872 */
5873 if (prio_less(cookie_pick, class_pick, in_fi) &&
5874 (!max || prio_less(max, class_pick, in_fi)))
5875 return class_pick;
5876
5877 return cookie_pick;
5878 }
5879
5880 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
5881
5882 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5883 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5884 {
5885 struct task_struct *next, *max = NULL;
5886 const struct sched_class *class;
5887 const struct cpumask *smt_mask;
5888 bool fi_before = false;
5889 int i, j, cpu, occ = 0;
5890 bool need_sync;
5891
5892 if (!sched_core_enabled(rq))
5893 return __pick_next_task(rq, prev, rf);
5894
5895 cpu = cpu_of(rq);
5896
5897 /* Stopper task is switching into idle, no need core-wide selection. */
5898 if (cpu_is_offline(cpu)) {
5899 /*
5900 * Reset core_pick so that we don't enter the fastpath when
5901 * coming online. core_pick would already be migrated to
5902 * another cpu during offline.
5903 */
5904 rq->core_pick = NULL;
5905 return __pick_next_task(rq, prev, rf);
5906 }
5907
5908 /*
5909 * If there were no {en,de}queues since we picked (IOW, the task
5910 * pointers are all still valid), and we haven't scheduled the last
5911 * pick yet, do so now.
5912 *
5913 * rq->core_pick can be NULL if no selection was made for a CPU because
5914 * it was either offline or went offline during a sibling's core-wide
5915 * selection. In this case, do a core-wide selection.
5916 */
5917 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
5918 rq->core->core_pick_seq != rq->core_sched_seq &&
5919 rq->core_pick) {
5920 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
5921
5922 next = rq->core_pick;
5923 if (next != prev) {
5924 put_prev_task(rq, prev);
5925 set_next_task(rq, next);
5926 }
5927
5928 rq->core_pick = NULL;
5929 return next;
5930 }
5931
5932 put_prev_task_balance(rq, prev, rf);
5933
5934 smt_mask = cpu_smt_mask(cpu);
5935 need_sync = !!rq->core->core_cookie;
5936
5937 /* reset state */
5938 rq->core->core_cookie = 0UL;
5939 if (rq->core->core_forceidle) {
5940 need_sync = true;
5941 fi_before = true;
5942 rq->core->core_forceidle = false;
5943 }
5944
5945 /*
5946 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
5947 *
5948 * @task_seq guards the task state ({en,de}queues)
5949 * @pick_seq is the @task_seq we did a selection on
5950 * @sched_seq is the @pick_seq we scheduled
5951 *
5952 * However, preemptions can cause multiple picks on the same task set.
5953 * 'Fix' this by also increasing @task_seq for every pick.
5954 */
5955 rq->core->core_task_seq++;
5956
5957 /*
5958 * Optimize for common case where this CPU has no cookies
5959 * and there are no cookied tasks running on siblings.
5960 */
5961 if (!need_sync) {
5962 for_each_class(class) {
5963 next = class->pick_task(rq);
5964 if (next)
5965 break;
5966 }
5967
5968 if (!next->core_cookie) {
5969 rq->core_pick = NULL;
5970 /*
5971 * For robustness, update the min_vruntime_fi for
5972 * unconstrained picks as well.
5973 */
5974 WARN_ON_ONCE(fi_before);
5975 task_vruntime_update(rq, next, false);
5976 goto done;
5977 }
5978 }
5979
5980 for_each_cpu(i, smt_mask) {
5981 struct rq *rq_i = cpu_rq(i);
5982
5983 rq_i->core_pick = NULL;
5984
5985 if (i != cpu)
5986 update_rq_clock(rq_i);
5987 }
5988
5989 /*
5990 * Try and select tasks for each sibling in descending sched_class
5991 * order.
5992 */
5993 for_each_class(class) {
5994 again:
5995 for_each_cpu_wrap(i, smt_mask, cpu) {
5996 struct rq *rq_i = cpu_rq(i);
5997 struct task_struct *p;
5998
5999 if (rq_i->core_pick)
6000 continue;
6001
6002 /*
6003 * If this sibling doesn't yet have a suitable task to
6004 * run; ask for the most eligible task, given the
6005 * highest priority task already selected for this
6006 * core.
6007 */
6008 p = pick_task(rq_i, class, max, fi_before);
6009 if (!p)
6010 continue;
6011
6012 if (!is_task_rq_idle(p))
6013 occ++;
6014
6015 rq_i->core_pick = p;
6016 if (rq_i->idle == p && rq_i->nr_running) {
6017 rq->core->core_forceidle = true;
6018 if (!fi_before)
6019 rq->core->core_forceidle_seq++;
6020 }
6021
6022 /*
6023 * If this new candidate is of higher priority than the
6024 * previous; and they're incompatible; we need to wipe
6025 * the slate and start over. pick_task makes sure that
6026 * p's priority is more than max if it doesn't match
6027 * max's cookie.
6028 *
6029 * NOTE: this is a linear max-filter and is thus bounded
6030 * in execution time.
6031 */
6032 if (!max || !cookie_match(max, p)) {
6033 struct task_struct *old_max = max;
6034
6035 rq->core->core_cookie = p->core_cookie;
6036 max = p;
6037
6038 if (old_max) {
6039 rq->core->core_forceidle = false;
6040 for_each_cpu(j, smt_mask) {
6041 if (j == i)
6042 continue;
6043
6044 cpu_rq(j)->core_pick = NULL;
6045 }
6046 occ = 1;
6047 goto again;
6048 }
6049 }
6050 }
6051 }
6052
6053 rq->core->core_pick_seq = rq->core->core_task_seq;
6054 next = rq->core_pick;
6055 rq->core_sched_seq = rq->core->core_pick_seq;
6056
6057 /* Something should have been selected for current CPU */
6058 WARN_ON_ONCE(!next);
6059
6060 /*
6061 * Reschedule siblings
6062 *
6063 * NOTE: L1TF -- at this point we're no longer running the old task and
6064 * sending an IPI (below) ensures the sibling will no longer be running
6065 * their task. This ensures there is no inter-sibling overlap between
6066 * non-matching user state.
6067 */
6068 for_each_cpu(i, smt_mask) {
6069 struct rq *rq_i = cpu_rq(i);
6070
6071 /*
6072 * An online sibling might have gone offline before a task
6073 * could be picked for it, or it might be offline but later
6074 * happen to come online, but its too late and nothing was
6075 * picked for it. That's Ok - it will pick tasks for itself,
6076 * so ignore it.
6077 */
6078 if (!rq_i->core_pick)
6079 continue;
6080
6081 /*
6082 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6083 * fi_before fi update?
6084 * 0 0 1
6085 * 0 1 1
6086 * 1 0 1
6087 * 1 1 0
6088 */
6089 if (!(fi_before && rq->core->core_forceidle))
6090 task_vruntime_update(rq_i, rq_i->core_pick, rq->core->core_forceidle);
6091
6092 rq_i->core_pick->core_occupation = occ;
6093
6094 if (i == cpu) {
6095 rq_i->core_pick = NULL;
6096 continue;
6097 }
6098
6099 /* Did we break L1TF mitigation requirements? */
6100 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6101
6102 if (rq_i->curr == rq_i->core_pick) {
6103 rq_i->core_pick = NULL;
6104 continue;
6105 }
6106
6107 resched_curr(rq_i);
6108 }
6109
6110 done:
6111 set_next_task(rq, next);
6112 return next;
6113 }
6114
try_steal_cookie(int this,int that)6115 static bool try_steal_cookie(int this, int that)
6116 {
6117 struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6118 struct task_struct *p;
6119 unsigned long cookie;
6120 bool success = false;
6121
6122 local_irq_disable();
6123 double_rq_lock(dst, src);
6124
6125 cookie = dst->core->core_cookie;
6126 if (!cookie)
6127 goto unlock;
6128
6129 if (dst->curr != dst->idle)
6130 goto unlock;
6131
6132 p = sched_core_find(src, cookie);
6133 if (p == src->idle)
6134 goto unlock;
6135
6136 do {
6137 if (p == src->core_pick || p == src->curr)
6138 goto next;
6139
6140 if (!is_cpu_allowed(p, this))
6141 goto next;
6142
6143 if (p->core_occupation > dst->idle->core_occupation)
6144 goto next;
6145
6146 deactivate_task(src, p, 0);
6147 set_task_cpu(p, this);
6148 activate_task(dst, p, 0);
6149
6150 resched_curr(dst);
6151
6152 success = true;
6153 break;
6154
6155 next:
6156 p = sched_core_next(p, cookie);
6157 } while (p);
6158
6159 unlock:
6160 double_rq_unlock(dst, src);
6161 local_irq_enable();
6162
6163 return success;
6164 }
6165
steal_cookie_task(int cpu,struct sched_domain * sd)6166 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6167 {
6168 int i;
6169
6170 for_each_cpu_wrap(i, sched_domain_span(sd), cpu) {
6171 if (i == cpu)
6172 continue;
6173
6174 if (need_resched())
6175 break;
6176
6177 if (try_steal_cookie(cpu, i))
6178 return true;
6179 }
6180
6181 return false;
6182 }
6183
sched_core_balance(struct rq * rq)6184 static void sched_core_balance(struct rq *rq)
6185 {
6186 struct sched_domain *sd;
6187 int cpu = cpu_of(rq);
6188
6189 preempt_disable();
6190 rcu_read_lock();
6191 raw_spin_rq_unlock_irq(rq);
6192 for_each_domain(cpu, sd) {
6193 if (need_resched())
6194 break;
6195
6196 if (steal_cookie_task(cpu, sd))
6197 break;
6198 }
6199 raw_spin_rq_lock_irq(rq);
6200 rcu_read_unlock();
6201 preempt_enable();
6202 }
6203
6204 static DEFINE_PER_CPU(struct callback_head, core_balance_head);
6205
queue_core_balance(struct rq * rq)6206 void queue_core_balance(struct rq *rq)
6207 {
6208 if (!sched_core_enabled(rq))
6209 return;
6210
6211 if (!rq->core->core_cookie)
6212 return;
6213
6214 if (!rq->nr_running) /* not forced idle */
6215 return;
6216
6217 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6218 }
6219
sched_core_cpu_starting(unsigned int cpu)6220 static void sched_core_cpu_starting(unsigned int cpu)
6221 {
6222 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6223 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6224 unsigned long flags;
6225 int t;
6226
6227 sched_core_lock(cpu, &flags);
6228
6229 WARN_ON_ONCE(rq->core != rq);
6230
6231 /* if we're the first, we'll be our own leader */
6232 if (cpumask_weight(smt_mask) == 1)
6233 goto unlock;
6234
6235 /* find the leader */
6236 for_each_cpu(t, smt_mask) {
6237 if (t == cpu)
6238 continue;
6239 rq = cpu_rq(t);
6240 if (rq->core == rq) {
6241 core_rq = rq;
6242 break;
6243 }
6244 }
6245
6246 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6247 goto unlock;
6248
6249 /* install and validate core_rq */
6250 for_each_cpu(t, smt_mask) {
6251 rq = cpu_rq(t);
6252
6253 if (t == cpu)
6254 rq->core = core_rq;
6255
6256 WARN_ON_ONCE(rq->core != core_rq);
6257 }
6258
6259 unlock:
6260 sched_core_unlock(cpu, &flags);
6261 }
6262
sched_core_cpu_deactivate(unsigned int cpu)6263 static void sched_core_cpu_deactivate(unsigned int cpu)
6264 {
6265 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6266 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6267 unsigned long flags;
6268 int t;
6269
6270 sched_core_lock(cpu, &flags);
6271
6272 /* if we're the last man standing, nothing to do */
6273 if (cpumask_weight(smt_mask) == 1) {
6274 WARN_ON_ONCE(rq->core != rq);
6275 goto unlock;
6276 }
6277
6278 /* if we're not the leader, nothing to do */
6279 if (rq->core != rq)
6280 goto unlock;
6281
6282 /* find a new leader */
6283 for_each_cpu(t, smt_mask) {
6284 if (t == cpu)
6285 continue;
6286 core_rq = cpu_rq(t);
6287 break;
6288 }
6289
6290 if (WARN_ON_ONCE(!core_rq)) /* impossible */
6291 goto unlock;
6292
6293 /* copy the shared state to the new leader */
6294 core_rq->core_task_seq = rq->core_task_seq;
6295 core_rq->core_pick_seq = rq->core_pick_seq;
6296 core_rq->core_cookie = rq->core_cookie;
6297 core_rq->core_forceidle = rq->core_forceidle;
6298 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6299
6300 /* install new leader */
6301 for_each_cpu(t, smt_mask) {
6302 rq = cpu_rq(t);
6303 rq->core = core_rq;
6304 }
6305
6306 unlock:
6307 sched_core_unlock(cpu, &flags);
6308 }
6309
sched_core_cpu_dying(unsigned int cpu)6310 static inline void sched_core_cpu_dying(unsigned int cpu)
6311 {
6312 struct rq *rq = cpu_rq(cpu);
6313
6314 if (rq->core != rq)
6315 rq->core = rq;
6316 }
6317
6318 #else /* !CONFIG_SCHED_CORE */
6319
sched_core_cpu_starting(unsigned int cpu)6320 static inline void sched_core_cpu_starting(unsigned int cpu) {}
sched_core_cpu_deactivate(unsigned int cpu)6321 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
sched_core_cpu_dying(unsigned int cpu)6322 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6323
6324 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6325 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6326 {
6327 return __pick_next_task(rq, prev, rf);
6328 }
6329
6330 #endif /* CONFIG_SCHED_CORE */
6331
6332 /*
6333 * Constants for the sched_mode argument of __schedule().
6334 *
6335 * The mode argument allows RT enabled kernels to differentiate a
6336 * preemption from blocking on an 'sleeping' spin/rwlock. Note that
6337 * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
6338 * optimize the AND operation out and just check for zero.
6339 */
6340 #define SM_NONE 0x0
6341 #define SM_PREEMPT 0x1
6342 #define SM_RTLOCK_WAIT 0x2
6343
6344 #ifndef CONFIG_PREEMPT_RT
6345 # define SM_MASK_PREEMPT (~0U)
6346 #else
6347 # define SM_MASK_PREEMPT SM_PREEMPT
6348 #endif
6349
6350 /*
6351 * __schedule() is the main scheduler function.
6352 *
6353 * The main means of driving the scheduler and thus entering this function are:
6354 *
6355 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6356 *
6357 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6358 * paths. For example, see arch/x86/entry_64.S.
6359 *
6360 * To drive preemption between tasks, the scheduler sets the flag in timer
6361 * interrupt handler scheduler_tick().
6362 *
6363 * 3. Wakeups don't really cause entry into schedule(). They add a
6364 * task to the run-queue and that's it.
6365 *
6366 * Now, if the new task added to the run-queue preempts the current
6367 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6368 * called on the nearest possible occasion:
6369 *
6370 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6371 *
6372 * - in syscall or exception context, at the next outmost
6373 * preempt_enable(). (this might be as soon as the wake_up()'s
6374 * spin_unlock()!)
6375 *
6376 * - in IRQ context, return from interrupt-handler to
6377 * preemptible context
6378 *
6379 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6380 * then at the next:
6381 *
6382 * - cond_resched() call
6383 * - explicit schedule() call
6384 * - return from syscall or exception to user-space
6385 * - return from interrupt-handler to user-space
6386 *
6387 * WARNING: must be called with preemption disabled!
6388 */
__schedule(unsigned int sched_mode)6389 static void __sched notrace __schedule(unsigned int sched_mode)
6390 {
6391 struct task_struct *prev, *next;
6392 unsigned long *switch_count;
6393 unsigned long prev_state;
6394 struct rq_flags rf;
6395 struct rq *rq;
6396 int cpu;
6397
6398 cpu = smp_processor_id();
6399 rq = cpu_rq(cpu);
6400 prev = rq->curr;
6401
6402 schedule_debug(prev, !!sched_mode);
6403
6404 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6405 hrtick_clear(rq);
6406
6407 local_irq_disable();
6408 rcu_note_context_switch(!!sched_mode);
6409
6410 /*
6411 * Make sure that signal_pending_state()->signal_pending() below
6412 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6413 * done by the caller to avoid the race with signal_wake_up():
6414 *
6415 * __set_current_state(@state) signal_wake_up()
6416 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
6417 * wake_up_state(p, state)
6418 * LOCK rq->lock LOCK p->pi_state
6419 * smp_mb__after_spinlock() smp_mb__after_spinlock()
6420 * if (signal_pending_state()) if (p->state & @state)
6421 *
6422 * Also, the membarrier system call requires a full memory barrier
6423 * after coming from user-space, before storing to rq->curr.
6424 */
6425 rq_lock(rq, &rf);
6426 smp_mb__after_spinlock();
6427
6428 /* Promote REQ to ACT */
6429 rq->clock_update_flags <<= 1;
6430 update_rq_clock(rq);
6431
6432 switch_count = &prev->nivcsw;
6433
6434 /*
6435 * We must load prev->state once (task_struct::state is volatile), such
6436 * that:
6437 *
6438 * - we form a control dependency vs deactivate_task() below.
6439 * - ptrace_{,un}freeze_traced() can change ->state underneath us.
6440 */
6441 prev_state = READ_ONCE(prev->__state);
6442 if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
6443 if (signal_pending_state(prev_state, prev)) {
6444 WRITE_ONCE(prev->__state, TASK_RUNNING);
6445 } else {
6446 prev->sched_contributes_to_load =
6447 (prev_state & TASK_UNINTERRUPTIBLE) &&
6448 !(prev_state & TASK_NOLOAD) &&
6449 !(prev->flags & PF_FROZEN);
6450
6451 if (prev->sched_contributes_to_load)
6452 rq->nr_uninterruptible++;
6453
6454 /*
6455 * __schedule() ttwu()
6456 * prev_state = prev->state; if (p->on_rq && ...)
6457 * if (prev_state) goto out;
6458 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6459 * p->state = TASK_WAKING
6460 *
6461 * Where __schedule() and ttwu() have matching control dependencies.
6462 *
6463 * After this, schedule() must not care about p->state any more.
6464 */
6465 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
6466
6467 if (prev->in_iowait) {
6468 atomic_inc(&rq->nr_iowait);
6469 delayacct_blkio_start();
6470 }
6471 }
6472 switch_count = &prev->nvcsw;
6473 }
6474
6475 next = pick_next_task(rq, prev, &rf);
6476 clear_tsk_need_resched(prev);
6477 clear_preempt_need_resched();
6478 #ifdef CONFIG_SCHED_DEBUG
6479 rq->last_seen_need_resched_ns = 0;
6480 #endif
6481
6482 trace_android_rvh_schedule(prev, next, rq);
6483 if (likely(prev != next)) {
6484 rq->nr_switches++;
6485 /*
6486 * RCU users of rcu_dereference(rq->curr) may not see
6487 * changes to task_struct made by pick_next_task().
6488 */
6489 RCU_INIT_POINTER(rq->curr, next);
6490 /*
6491 * The membarrier system call requires each architecture
6492 * to have a full memory barrier after updating
6493 * rq->curr, before returning to user-space.
6494 *
6495 * Here are the schemes providing that barrier on the
6496 * various architectures:
6497 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
6498 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
6499 * - finish_lock_switch() for weakly-ordered
6500 * architectures where spin_unlock is a full barrier,
6501 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6502 * is a RELEASE barrier),
6503 */
6504 ++*switch_count;
6505
6506 migrate_disable_switch(rq, prev);
6507 psi_sched_switch(prev, next, !task_on_rq_queued(prev));
6508
6509 trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next);
6510
6511 /* Also unlocks the rq: */
6512 rq = context_switch(rq, prev, next, &rf);
6513 } else {
6514 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
6515
6516 rq_unpin_lock(rq, &rf);
6517 __balance_callbacks(rq);
6518 raw_spin_rq_unlock_irq(rq);
6519 }
6520 }
6521
do_task_dead(void)6522 void __noreturn do_task_dead(void)
6523 {
6524 /* Causes final put_task_struct in finish_task_switch(): */
6525 set_special_state(TASK_DEAD);
6526
6527 /* Tell freezer to ignore us: */
6528 current->flags |= PF_NOFREEZE;
6529
6530 __schedule(SM_NONE);
6531 BUG();
6532
6533 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6534 for (;;)
6535 cpu_relax();
6536 }
6537
sched_submit_work(struct task_struct * tsk)6538 static inline void sched_submit_work(struct task_struct *tsk)
6539 {
6540 unsigned int task_flags;
6541
6542 if (task_is_running(tsk))
6543 return;
6544
6545 task_flags = tsk->flags;
6546 /*
6547 * If a worker went to sleep, notify and ask workqueue whether
6548 * it wants to wake up a task to maintain concurrency.
6549 * As this function is called inside the schedule() context,
6550 * we disable preemption to avoid it calling schedule() again
6551 * in the possible wakeup of a kworker and because wq_worker_sleeping()
6552 * requires it.
6553 */
6554 if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
6555 preempt_disable();
6556 if (task_flags & PF_WQ_WORKER)
6557 wq_worker_sleeping(tsk);
6558 else
6559 io_wq_worker_sleeping(tsk);
6560 preempt_enable_no_resched();
6561 }
6562
6563 /*
6564 * spinlock and rwlock must not flush block requests. This will
6565 * deadlock if the callback attempts to acquire a lock which is
6566 * already acquired.
6567 */
6568 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6569
6570 /*
6571 * If we are going to sleep and we have plugged IO queued,
6572 * make sure to submit it to avoid deadlocks.
6573 */
6574 if (blk_needs_flush_plug(tsk))
6575 blk_schedule_flush_plug(tsk);
6576 }
6577
sched_update_worker(struct task_struct * tsk)6578 static void sched_update_worker(struct task_struct *tsk)
6579 {
6580 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
6581 if (tsk->flags & PF_WQ_WORKER)
6582 wq_worker_running(tsk);
6583 else
6584 io_wq_worker_running(tsk);
6585 }
6586 }
6587
schedule(void)6588 asmlinkage __visible void __sched schedule(void)
6589 {
6590 struct task_struct *tsk = current;
6591
6592 sched_submit_work(tsk);
6593 do {
6594 preempt_disable();
6595 __schedule(SM_NONE);
6596 sched_preempt_enable_no_resched();
6597 } while (need_resched());
6598 sched_update_worker(tsk);
6599 }
6600 EXPORT_SYMBOL(schedule);
6601
6602 /*
6603 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6604 * state (have scheduled out non-voluntarily) by making sure that all
6605 * tasks have either left the run queue or have gone into user space.
6606 * As idle tasks do not do either, they must not ever be preempted
6607 * (schedule out non-voluntarily).
6608 *
6609 * schedule_idle() is similar to schedule_preempt_disable() except that it
6610 * never enables preemption because it does not call sched_submit_work().
6611 */
schedule_idle(void)6612 void __sched schedule_idle(void)
6613 {
6614 /*
6615 * As this skips calling sched_submit_work(), which the idle task does
6616 * regardless because that function is a nop when the task is in a
6617 * TASK_RUNNING state, make sure this isn't used someplace that the
6618 * current task can be in any other state. Note, idle is always in the
6619 * TASK_RUNNING state.
6620 */
6621 WARN_ON_ONCE(current->__state);
6622 do {
6623 __schedule(SM_NONE);
6624 } while (need_resched());
6625 }
6626
6627 #if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK)
schedule_user(void)6628 asmlinkage __visible void __sched schedule_user(void)
6629 {
6630 /*
6631 * If we come here after a random call to set_need_resched(),
6632 * or we have been woken up remotely but the IPI has not yet arrived,
6633 * we haven't yet exited the RCU idle mode. Do it here manually until
6634 * we find a better solution.
6635 *
6636 * NB: There are buggy callers of this function. Ideally we
6637 * should warn if prev_state != CONTEXT_USER, but that will trigger
6638 * too frequently to make sense yet.
6639 */
6640 enum ctx_state prev_state = exception_enter();
6641 schedule();
6642 exception_exit(prev_state);
6643 }
6644 #endif
6645
6646 /**
6647 * schedule_preempt_disabled - called with preemption disabled
6648 *
6649 * Returns with preemption disabled. Note: preempt_count must be 1
6650 */
schedule_preempt_disabled(void)6651 void __sched schedule_preempt_disabled(void)
6652 {
6653 sched_preempt_enable_no_resched();
6654 schedule();
6655 preempt_disable();
6656 }
6657
6658 #ifdef CONFIG_PREEMPT_RT
schedule_rtlock(void)6659 void __sched notrace schedule_rtlock(void)
6660 {
6661 do {
6662 preempt_disable();
6663 __schedule(SM_RTLOCK_WAIT);
6664 sched_preempt_enable_no_resched();
6665 } while (need_resched());
6666 }
6667 NOKPROBE_SYMBOL(schedule_rtlock);
6668 #endif
6669
preempt_schedule_common(void)6670 static void __sched notrace preempt_schedule_common(void)
6671 {
6672 do {
6673 /*
6674 * Because the function tracer can trace preempt_count_sub()
6675 * and it also uses preempt_enable/disable_notrace(), if
6676 * NEED_RESCHED is set, the preempt_enable_notrace() called
6677 * by the function tracer will call this function again and
6678 * cause infinite recursion.
6679 *
6680 * Preemption must be disabled here before the function
6681 * tracer can trace. Break up preempt_disable() into two
6682 * calls. One to disable preemption without fear of being
6683 * traced. The other to still record the preemption latency,
6684 * which can also be traced by the function tracer.
6685 */
6686 preempt_disable_notrace();
6687 preempt_latency_start(1);
6688 __schedule(SM_PREEMPT);
6689 preempt_latency_stop(1);
6690 preempt_enable_no_resched_notrace();
6691
6692 /*
6693 * Check again in case we missed a preemption opportunity
6694 * between schedule and now.
6695 */
6696 } while (need_resched());
6697 }
6698
6699 #ifdef CONFIG_PREEMPTION
6700 /*
6701 * This is the entry point to schedule() from in-kernel preemption
6702 * off of preempt_enable.
6703 */
preempt_schedule(void)6704 asmlinkage __visible void __sched notrace preempt_schedule(void)
6705 {
6706 /*
6707 * If there is a non-zero preempt_count or interrupts are disabled,
6708 * we do not want to preempt the current task. Just return..
6709 */
6710 if (likely(!preemptible()))
6711 return;
6712
6713 preempt_schedule_common();
6714 }
6715 NOKPROBE_SYMBOL(preempt_schedule);
6716 EXPORT_SYMBOL(preempt_schedule);
6717
6718 #ifdef CONFIG_PREEMPT_DYNAMIC
6719 DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
6720 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6721 #endif
6722
6723
6724 /**
6725 * preempt_schedule_notrace - preempt_schedule called by tracing
6726 *
6727 * The tracing infrastructure uses preempt_enable_notrace to prevent
6728 * recursion and tracing preempt enabling caused by the tracing
6729 * infrastructure itself. But as tracing can happen in areas coming
6730 * from userspace or just about to enter userspace, a preempt enable
6731 * can occur before user_exit() is called. This will cause the scheduler
6732 * to be called when the system is still in usermode.
6733 *
6734 * To prevent this, the preempt_enable_notrace will use this function
6735 * instead of preempt_schedule() to exit user context if needed before
6736 * calling the scheduler.
6737 */
preempt_schedule_notrace(void)6738 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
6739 {
6740 enum ctx_state prev_ctx;
6741
6742 if (likely(!preemptible()))
6743 return;
6744
6745 do {
6746 /*
6747 * Because the function tracer can trace preempt_count_sub()
6748 * and it also uses preempt_enable/disable_notrace(), if
6749 * NEED_RESCHED is set, the preempt_enable_notrace() called
6750 * by the function tracer will call this function again and
6751 * cause infinite recursion.
6752 *
6753 * Preemption must be disabled here before the function
6754 * tracer can trace. Break up preempt_disable() into two
6755 * calls. One to disable preemption without fear of being
6756 * traced. The other to still record the preemption latency,
6757 * which can also be traced by the function tracer.
6758 */
6759 preempt_disable_notrace();
6760 preempt_latency_start(1);
6761 /*
6762 * Needs preempt disabled in case user_exit() is traced
6763 * and the tracer calls preempt_enable_notrace() causing
6764 * an infinite recursion.
6765 */
6766 prev_ctx = exception_enter();
6767 __schedule(SM_PREEMPT);
6768 exception_exit(prev_ctx);
6769
6770 preempt_latency_stop(1);
6771 preempt_enable_no_resched_notrace();
6772 } while (need_resched());
6773 }
6774 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
6775
6776 #ifdef CONFIG_PREEMPT_DYNAMIC
6777 DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
6778 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
6779 #endif
6780
6781 #endif /* CONFIG_PREEMPTION */
6782
6783 #ifdef CONFIG_PREEMPT_DYNAMIC
6784
6785 #include <linux/entry-common.h>
6786
6787 /*
6788 * SC:cond_resched
6789 * SC:might_resched
6790 * SC:preempt_schedule
6791 * SC:preempt_schedule_notrace
6792 * SC:irqentry_exit_cond_resched
6793 *
6794 *
6795 * NONE:
6796 * cond_resched <- __cond_resched
6797 * might_resched <- RET0
6798 * preempt_schedule <- NOP
6799 * preempt_schedule_notrace <- NOP
6800 * irqentry_exit_cond_resched <- NOP
6801 *
6802 * VOLUNTARY:
6803 * cond_resched <- __cond_resched
6804 * might_resched <- __cond_resched
6805 * preempt_schedule <- NOP
6806 * preempt_schedule_notrace <- NOP
6807 * irqentry_exit_cond_resched <- NOP
6808 *
6809 * FULL:
6810 * cond_resched <- RET0
6811 * might_resched <- RET0
6812 * preempt_schedule <- preempt_schedule
6813 * preempt_schedule_notrace <- preempt_schedule_notrace
6814 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
6815 */
6816
6817 enum {
6818 preempt_dynamic_none = 0,
6819 preempt_dynamic_voluntary,
6820 preempt_dynamic_full,
6821 };
6822
6823 int preempt_dynamic_mode = preempt_dynamic_full;
6824
sched_dynamic_mode(const char * str)6825 int sched_dynamic_mode(const char *str)
6826 {
6827 if (!strcmp(str, "none"))
6828 return preempt_dynamic_none;
6829
6830 if (!strcmp(str, "voluntary"))
6831 return preempt_dynamic_voluntary;
6832
6833 if (!strcmp(str, "full"))
6834 return preempt_dynamic_full;
6835
6836 return -EINVAL;
6837 }
6838
sched_dynamic_update(int mode)6839 void sched_dynamic_update(int mode)
6840 {
6841 /*
6842 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
6843 * the ZERO state, which is invalid.
6844 */
6845 static_call_update(cond_resched, __cond_resched);
6846 static_call_update(might_resched, __cond_resched);
6847 static_call_update(preempt_schedule, __preempt_schedule_func);
6848 static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
6849 static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
6850
6851 switch (mode) {
6852 case preempt_dynamic_none:
6853 static_call_update(cond_resched, __cond_resched);
6854 static_call_update(might_resched, (void *)&__static_call_return0);
6855 static_call_update(preempt_schedule, NULL);
6856 static_call_update(preempt_schedule_notrace, NULL);
6857 static_call_update(irqentry_exit_cond_resched, NULL);
6858 pr_info("Dynamic Preempt: none\n");
6859 break;
6860
6861 case preempt_dynamic_voluntary:
6862 static_call_update(cond_resched, __cond_resched);
6863 static_call_update(might_resched, __cond_resched);
6864 static_call_update(preempt_schedule, NULL);
6865 static_call_update(preempt_schedule_notrace, NULL);
6866 static_call_update(irqentry_exit_cond_resched, NULL);
6867 pr_info("Dynamic Preempt: voluntary\n");
6868 break;
6869
6870 case preempt_dynamic_full:
6871 static_call_update(cond_resched, (void *)&__static_call_return0);
6872 static_call_update(might_resched, (void *)&__static_call_return0);
6873 static_call_update(preempt_schedule, __preempt_schedule_func);
6874 static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
6875 static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
6876 pr_info("Dynamic Preempt: full\n");
6877 break;
6878 }
6879
6880 preempt_dynamic_mode = mode;
6881 }
6882
setup_preempt_mode(char * str)6883 static int __init setup_preempt_mode(char *str)
6884 {
6885 int mode = sched_dynamic_mode(str);
6886 if (mode < 0) {
6887 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
6888 return 0;
6889 }
6890
6891 sched_dynamic_update(mode);
6892 return 1;
6893 }
6894 __setup("preempt=", setup_preempt_mode);
6895
6896 #endif /* CONFIG_PREEMPT_DYNAMIC */
6897
6898 /*
6899 * This is the entry point to schedule() from kernel preemption
6900 * off of irq context.
6901 * Note, that this is called and return with irqs disabled. This will
6902 * protect us against recursive calling from irq.
6903 */
preempt_schedule_irq(void)6904 asmlinkage __visible void __sched preempt_schedule_irq(void)
6905 {
6906 enum ctx_state prev_state;
6907
6908 /* Catch callers which need to be fixed */
6909 BUG_ON(preempt_count() || !irqs_disabled());
6910
6911 prev_state = exception_enter();
6912
6913 do {
6914 preempt_disable();
6915 local_irq_enable();
6916 __schedule(SM_PREEMPT);
6917 local_irq_disable();
6918 sched_preempt_enable_no_resched();
6919 } while (need_resched());
6920
6921 exception_exit(prev_state);
6922 }
6923
default_wake_function(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)6924 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
6925 void *key)
6926 {
6927 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC | WF_ANDROID_VENDOR));
6928 return try_to_wake_up(curr->private, mode, wake_flags);
6929 }
6930 EXPORT_SYMBOL(default_wake_function);
6931
__setscheduler_prio(struct task_struct * p,int prio)6932 static void __setscheduler_prio(struct task_struct *p, int prio)
6933 {
6934 if (dl_prio(prio))
6935 p->sched_class = &dl_sched_class;
6936 else if (rt_prio(prio))
6937 p->sched_class = &rt_sched_class;
6938 else
6939 p->sched_class = &fair_sched_class;
6940
6941 p->prio = prio;
6942 }
6943
6944 #ifdef CONFIG_RT_MUTEXES
6945
__rt_effective_prio(struct task_struct * pi_task,int prio)6946 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
6947 {
6948 if (pi_task)
6949 prio = min(prio, pi_task->prio);
6950
6951 return prio;
6952 }
6953
rt_effective_prio(struct task_struct * p,int prio)6954 static inline int rt_effective_prio(struct task_struct *p, int prio)
6955 {
6956 struct task_struct *pi_task = rt_mutex_get_top_task(p);
6957
6958 return __rt_effective_prio(pi_task, prio);
6959 }
6960
6961 /*
6962 * rt_mutex_setprio - set the current priority of a task
6963 * @p: task to boost
6964 * @pi_task: donor task
6965 *
6966 * This function changes the 'effective' priority of a task. It does
6967 * not touch ->normal_prio like __setscheduler().
6968 *
6969 * Used by the rt_mutex code to implement priority inheritance
6970 * logic. Call site only calls if the priority of the task changed.
6971 */
rt_mutex_setprio(struct task_struct * p,struct task_struct * pi_task)6972 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
6973 {
6974 int prio, oldprio, queued, running, queue_flag =
6975 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
6976 const struct sched_class *prev_class;
6977 struct rq_flags rf;
6978 struct rq *rq;
6979 int update = 0;
6980
6981 trace_android_rvh_rtmutex_prepare_setprio(p, pi_task);
6982 /* XXX used to be waiter->prio, not waiter->task->prio */
6983 prio = __rt_effective_prio(pi_task, p->normal_prio);
6984
6985 trace_android_rvh_rtmutex_force_update(p, pi_task, &update);
6986 /*
6987 * If nothing changed; bail early.
6988 */
6989 if (!update && p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
6990 return;
6991
6992 rq = __task_rq_lock(p, &rf);
6993 update_rq_clock(rq);
6994 /*
6995 * Set under pi_lock && rq->lock, such that the value can be used under
6996 * either lock.
6997 *
6998 * Note that there is loads of tricky to make this pointer cache work
6999 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7000 * ensure a task is de-boosted (pi_task is set to NULL) before the
7001 * task is allowed to run again (and can exit). This ensures the pointer
7002 * points to a blocked task -- which guarantees the task is present.
7003 */
7004 p->pi_top_task = pi_task;
7005
7006 /*
7007 * For FIFO/RR we only need to set prio, if that matches we're done.
7008 */
7009 if (!update && prio == p->prio && !dl_prio(prio))
7010 goto out_unlock;
7011
7012 /*
7013 * Idle task boosting is a nono in general. There is one
7014 * exception, when PREEMPT_RT and NOHZ is active:
7015 *
7016 * The idle task calls get_next_timer_interrupt() and holds
7017 * the timer wheel base->lock on the CPU and another CPU wants
7018 * to access the timer (probably to cancel it). We can safely
7019 * ignore the boosting request, as the idle CPU runs this code
7020 * with interrupts disabled and will complete the lock
7021 * protected section without being interrupted. So there is no
7022 * real need to boost.
7023 */
7024 if (unlikely(p == rq->idle)) {
7025 WARN_ON(p != rq->curr);
7026 WARN_ON(p->pi_blocked_on);
7027 goto out_unlock;
7028 }
7029
7030 trace_sched_pi_setprio(p, pi_task);
7031 oldprio = p->prio;
7032
7033 if (oldprio == prio)
7034 queue_flag &= ~DEQUEUE_MOVE;
7035
7036 prev_class = p->sched_class;
7037 queued = task_on_rq_queued(p);
7038 running = task_current(rq, p);
7039 if (queued)
7040 dequeue_task(rq, p, queue_flag);
7041 if (running)
7042 put_prev_task(rq, p);
7043
7044 /*
7045 * Boosting condition are:
7046 * 1. -rt task is running and holds mutex A
7047 * --> -dl task blocks on mutex A
7048 *
7049 * 2. -dl task is running and holds mutex A
7050 * --> -dl task blocks on mutex A and could preempt the
7051 * running task
7052 */
7053 if (dl_prio(prio)) {
7054 if (!dl_prio(p->normal_prio) ||
7055 (pi_task && dl_prio(pi_task->prio) &&
7056 dl_entity_preempt(&pi_task->dl, &p->dl))) {
7057 p->dl.pi_se = pi_task->dl.pi_se;
7058 queue_flag |= ENQUEUE_REPLENISH;
7059 } else {
7060 p->dl.pi_se = &p->dl;
7061 }
7062 } else if (rt_prio(prio)) {
7063 if (dl_prio(oldprio))
7064 p->dl.pi_se = &p->dl;
7065 if (oldprio < prio)
7066 queue_flag |= ENQUEUE_HEAD;
7067 } else {
7068 if (dl_prio(oldprio))
7069 p->dl.pi_se = &p->dl;
7070 if (rt_prio(oldprio))
7071 p->rt.timeout = 0;
7072 }
7073
7074 __setscheduler_prio(p, prio);
7075
7076 if (queued)
7077 enqueue_task(rq, p, queue_flag);
7078 if (running)
7079 set_next_task(rq, p);
7080
7081 check_class_changed(rq, p, prev_class, oldprio);
7082 out_unlock:
7083 /* Avoid rq from going away on us: */
7084 preempt_disable();
7085
7086 rq_unpin_lock(rq, &rf);
7087 __balance_callbacks(rq);
7088 raw_spin_rq_unlock(rq);
7089
7090 preempt_enable();
7091 }
7092 #else
rt_effective_prio(struct task_struct * p,int prio)7093 static inline int rt_effective_prio(struct task_struct *p, int prio)
7094 {
7095 return prio;
7096 }
7097 #endif
7098
set_user_nice(struct task_struct * p,long nice)7099 void set_user_nice(struct task_struct *p, long nice)
7100 {
7101 bool queued, running, allowed = false;
7102 int old_prio;
7103 struct rq_flags rf;
7104 struct rq *rq;
7105
7106 trace_android_rvh_set_user_nice(p, &nice, &allowed);
7107 if ((task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) && !allowed)
7108 return;
7109 /*
7110 * We have to be careful, if called from sys_setpriority(),
7111 * the task might be in the middle of scheduling on another CPU.
7112 */
7113 rq = task_rq_lock(p, &rf);
7114 update_rq_clock(rq);
7115
7116 /*
7117 * The RT priorities are set via sched_setscheduler(), but we still
7118 * allow the 'normal' nice value to be set - but as expected
7119 * it won't have any effect on scheduling until the task is
7120 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
7121 */
7122 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
7123 p->static_prio = NICE_TO_PRIO(nice);
7124 goto out_unlock;
7125 }
7126 queued = task_on_rq_queued(p);
7127 running = task_current(rq, p);
7128 if (queued)
7129 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
7130 if (running)
7131 put_prev_task(rq, p);
7132
7133 p->static_prio = NICE_TO_PRIO(nice);
7134 set_load_weight(p, true);
7135 old_prio = p->prio;
7136 p->prio = effective_prio(p);
7137
7138 if (queued)
7139 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7140 if (running)
7141 set_next_task(rq, p);
7142
7143 /*
7144 * If the task increased its priority or is running and
7145 * lowered its priority, then reschedule its CPU:
7146 */
7147 p->sched_class->prio_changed(rq, p, old_prio);
7148
7149 out_unlock:
7150 task_rq_unlock(rq, p, &rf);
7151 }
7152 EXPORT_SYMBOL(set_user_nice);
7153
7154 /*
7155 * can_nice - check if a task can reduce its nice value
7156 * @p: task
7157 * @nice: nice value
7158 */
can_nice(const struct task_struct * p,const int nice)7159 int can_nice(const struct task_struct *p, const int nice)
7160 {
7161 /* Convert nice value [19,-20] to rlimit style value [1,40]: */
7162 int nice_rlim = nice_to_rlimit(nice);
7163
7164 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
7165 capable(CAP_SYS_NICE));
7166 }
7167
7168 #ifdef __ARCH_WANT_SYS_NICE
7169
7170 /*
7171 * sys_nice - change the priority of the current process.
7172 * @increment: priority increment
7173 *
7174 * sys_setpriority is a more generic, but much slower function that
7175 * does similar things.
7176 */
SYSCALL_DEFINE1(nice,int,increment)7177 SYSCALL_DEFINE1(nice, int, increment)
7178 {
7179 long nice, retval;
7180
7181 /*
7182 * Setpriority might change our priority at the same moment.
7183 * We don't have to worry. Conceptually one call occurs first
7184 * and we have a single winner.
7185 */
7186 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
7187 nice = task_nice(current) + increment;
7188
7189 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
7190 if (increment < 0 && !can_nice(current, nice))
7191 return -EPERM;
7192
7193 retval = security_task_setnice(current, nice);
7194 if (retval)
7195 return retval;
7196
7197 set_user_nice(current, nice);
7198 return 0;
7199 }
7200
7201 #endif
7202
7203 /**
7204 * task_prio - return the priority value of a given task.
7205 * @p: the task in question.
7206 *
7207 * Return: The priority value as seen by users in /proc.
7208 *
7209 * sched policy return value kernel prio user prio/nice
7210 *
7211 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19]
7212 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99]
7213 * deadline -101 -1 0
7214 */
task_prio(const struct task_struct * p)7215 int task_prio(const struct task_struct *p)
7216 {
7217 return p->prio - MAX_RT_PRIO;
7218 }
7219
7220 /**
7221 * idle_cpu - is a given CPU idle currently?
7222 * @cpu: the processor in question.
7223 *
7224 * Return: 1 if the CPU is currently idle. 0 otherwise.
7225 */
idle_cpu(int cpu)7226 int idle_cpu(int cpu)
7227 {
7228 struct rq *rq = cpu_rq(cpu);
7229
7230 if (rq->curr != rq->idle)
7231 return 0;
7232
7233 if (rq->nr_running)
7234 return 0;
7235
7236 #ifdef CONFIG_SMP
7237 if (rq->ttwu_pending)
7238 return 0;
7239 #endif
7240
7241 return 1;
7242 }
7243
7244 /**
7245 * available_idle_cpu - is a given CPU idle for enqueuing work.
7246 * @cpu: the CPU in question.
7247 *
7248 * Return: 1 if the CPU is currently idle. 0 otherwise.
7249 */
available_idle_cpu(int cpu)7250 int available_idle_cpu(int cpu)
7251 {
7252 if (!idle_cpu(cpu))
7253 return 0;
7254
7255 if (vcpu_is_preempted(cpu))
7256 return 0;
7257
7258 return 1;
7259 }
7260 EXPORT_SYMBOL_GPL(available_idle_cpu);
7261
7262 /**
7263 * idle_task - return the idle task for a given CPU.
7264 * @cpu: the processor in question.
7265 *
7266 * Return: The idle task for the CPU @cpu.
7267 */
idle_task(int cpu)7268 struct task_struct *idle_task(int cpu)
7269 {
7270 return cpu_rq(cpu)->idle;
7271 }
7272
7273 #ifdef CONFIG_SMP
7274 /*
7275 * This function computes an effective utilization for the given CPU, to be
7276 * used for frequency selection given the linear relation: f = u * f_max.
7277 *
7278 * The scheduler tracks the following metrics:
7279 *
7280 * cpu_util_{cfs,rt,dl,irq}()
7281 * cpu_bw_dl()
7282 *
7283 * Where the cfs,rt and dl util numbers are tracked with the same metric and
7284 * synchronized windows and are thus directly comparable.
7285 *
7286 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
7287 * which excludes things like IRQ and steal-time. These latter are then accrued
7288 * in the irq utilization.
7289 *
7290 * The DL bandwidth number otoh is not a measured metric but a value computed
7291 * based on the task model parameters and gives the minimal utilization
7292 * required to meet deadlines.
7293 */
effective_cpu_util(int cpu,unsigned long util_cfs,unsigned long max,enum cpu_util_type type,struct task_struct * p)7294 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
7295 unsigned long max, enum cpu_util_type type,
7296 struct task_struct *p)
7297 {
7298 unsigned long dl_util, util, irq;
7299 struct rq *rq = cpu_rq(cpu);
7300 unsigned long new_util = ULONG_MAX;
7301
7302 trace_android_rvh_effective_cpu_util(cpu, util_cfs, max, type, p, &new_util);
7303 if (new_util != ULONG_MAX)
7304 return new_util;
7305
7306 if (!uclamp_is_used() &&
7307 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
7308 return max;
7309 }
7310
7311 /*
7312 * Early check to see if IRQ/steal time saturates the CPU, can be
7313 * because of inaccuracies in how we track these -- see
7314 * update_irq_load_avg().
7315 */
7316 irq = cpu_util_irq(rq);
7317 if (unlikely(irq >= max))
7318 return max;
7319
7320 /*
7321 * Because the time spend on RT/DL tasks is visible as 'lost' time to
7322 * CFS tasks and we use the same metric to track the effective
7323 * utilization (PELT windows are synchronized) we can directly add them
7324 * to obtain the CPU's actual utilization.
7325 *
7326 * CFS and RT utilization can be boosted or capped, depending on
7327 * utilization clamp constraints requested by currently RUNNABLE
7328 * tasks.
7329 * When there are no CFS RUNNABLE tasks, clamps are released and
7330 * frequency will be gracefully reduced with the utilization decay.
7331 */
7332 util = util_cfs + cpu_util_rt(rq);
7333 if (type == FREQUENCY_UTIL)
7334 util = uclamp_rq_util_with(rq, util, p);
7335
7336 dl_util = cpu_util_dl(rq);
7337
7338 /*
7339 * For frequency selection we do not make cpu_util_dl() a permanent part
7340 * of this sum because we want to use cpu_bw_dl() later on, but we need
7341 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
7342 * that we select f_max when there is no idle time.
7343 *
7344 * NOTE: numerical errors or stop class might cause us to not quite hit
7345 * saturation when we should -- something for later.
7346 */
7347 if (util + dl_util >= max)
7348 return max;
7349
7350 /*
7351 * OTOH, for energy computation we need the estimated running time, so
7352 * include util_dl and ignore dl_bw.
7353 */
7354 if (type == ENERGY_UTIL)
7355 util += dl_util;
7356
7357 /*
7358 * There is still idle time; further improve the number by using the
7359 * irq metric. Because IRQ/steal time is hidden from the task clock we
7360 * need to scale the task numbers:
7361 *
7362 * max - irq
7363 * U' = irq + --------- * U
7364 * max
7365 */
7366 util = scale_irq_capacity(util, irq, max);
7367 util += irq;
7368
7369 /*
7370 * Bandwidth required by DEADLINE must always be granted while, for
7371 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
7372 * to gracefully reduce the frequency when no tasks show up for longer
7373 * periods of time.
7374 *
7375 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
7376 * bw_dl as requested freq. However, cpufreq is not yet ready for such
7377 * an interface. So, we only do the latter for now.
7378 */
7379 if (type == FREQUENCY_UTIL)
7380 util += cpu_bw_dl(rq);
7381
7382 return min(max, util);
7383 }
7384
sched_cpu_util(int cpu,unsigned long max)7385 unsigned long sched_cpu_util(int cpu, unsigned long max)
7386 {
7387 return effective_cpu_util(cpu, cpu_util_cfs(cpu_rq(cpu)), max,
7388 ENERGY_UTIL, NULL);
7389 }
7390 #endif /* CONFIG_SMP */
7391
7392 /**
7393 * find_process_by_pid - find a process with a matching PID value.
7394 * @pid: the pid in question.
7395 *
7396 * The task of @pid, if found. %NULL otherwise.
7397 */
find_process_by_pid(pid_t pid)7398 static struct task_struct *find_process_by_pid(pid_t pid)
7399 {
7400 return pid ? find_task_by_vpid(pid) : current;
7401 }
7402
7403 /*
7404 * sched_setparam() passes in -1 for its policy, to let the functions
7405 * it calls know not to change it.
7406 */
7407 #define SETPARAM_POLICY -1
7408
__setscheduler_params(struct task_struct * p,const struct sched_attr * attr)7409 static void __setscheduler_params(struct task_struct *p,
7410 const struct sched_attr *attr)
7411 {
7412 int policy = attr->sched_policy;
7413
7414 if (policy == SETPARAM_POLICY)
7415 policy = p->policy;
7416
7417 p->policy = policy;
7418
7419 if (dl_policy(policy))
7420 __setparam_dl(p, attr);
7421 else if (fair_policy(policy))
7422 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
7423
7424 /*
7425 * __sched_setscheduler() ensures attr->sched_priority == 0 when
7426 * !rt_policy. Always setting this ensures that things like
7427 * getparam()/getattr() don't report silly values for !rt tasks.
7428 */
7429 p->rt_priority = attr->sched_priority;
7430 p->normal_prio = normal_prio(p);
7431 set_load_weight(p, true);
7432 }
7433
7434 /*
7435 * Check the target process has a UID that matches the current process's:
7436 */
check_same_owner(struct task_struct * p)7437 static bool check_same_owner(struct task_struct *p)
7438 {
7439 const struct cred *cred = current_cred(), *pcred;
7440 bool match;
7441
7442 rcu_read_lock();
7443 pcred = __task_cred(p);
7444 match = (uid_eq(cred->euid, pcred->euid) ||
7445 uid_eq(cred->euid, pcred->uid));
7446 rcu_read_unlock();
7447 return match;
7448 }
7449
__sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,bool user,bool pi)7450 static int __sched_setscheduler(struct task_struct *p,
7451 const struct sched_attr *attr,
7452 bool user, bool pi)
7453 {
7454 int oldpolicy = -1, policy = attr->sched_policy;
7455 int retval, oldprio, newprio, queued, running;
7456 const struct sched_class *prev_class;
7457 struct callback_head *head;
7458 struct rq_flags rf;
7459 int reset_on_fork;
7460 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7461 struct rq *rq;
7462 bool cpuset_locked = false;
7463
7464 /* The pi code expects interrupts enabled */
7465 BUG_ON(pi && in_interrupt());
7466 recheck:
7467 /* Double check policy once rq lock held: */
7468 if (policy < 0) {
7469 reset_on_fork = p->sched_reset_on_fork;
7470 policy = oldpolicy = p->policy;
7471 } else {
7472 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
7473
7474 if (!valid_policy(policy))
7475 return -EINVAL;
7476 }
7477
7478 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
7479 return -EINVAL;
7480
7481 /*
7482 * Valid priorities for SCHED_FIFO and SCHED_RR are
7483 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
7484 * SCHED_BATCH and SCHED_IDLE is 0.
7485 */
7486 if (attr->sched_priority > MAX_RT_PRIO-1)
7487 return -EINVAL;
7488 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
7489 (rt_policy(policy) != (attr->sched_priority != 0)))
7490 return -EINVAL;
7491
7492 /*
7493 * Allow unprivileged RT tasks to decrease priority:
7494 */
7495 if (user && !capable(CAP_SYS_NICE)) {
7496 if (fair_policy(policy)) {
7497 if (attr->sched_nice < task_nice(p) &&
7498 !can_nice(p, attr->sched_nice))
7499 return -EPERM;
7500 }
7501
7502 if (rt_policy(policy)) {
7503 unsigned long rlim_rtprio =
7504 task_rlimit(p, RLIMIT_RTPRIO);
7505
7506 /* Can't set/change the rt policy: */
7507 if (policy != p->policy && !rlim_rtprio)
7508 return -EPERM;
7509
7510 /* Can't increase priority: */
7511 if (attr->sched_priority > p->rt_priority &&
7512 attr->sched_priority > rlim_rtprio)
7513 return -EPERM;
7514 }
7515
7516 /*
7517 * Can't set/change SCHED_DEADLINE policy at all for now
7518 * (safest behavior); in the future we would like to allow
7519 * unprivileged DL tasks to increase their relative deadline
7520 * or reduce their runtime (both ways reducing utilization)
7521 */
7522 if (dl_policy(policy))
7523 return -EPERM;
7524
7525 /*
7526 * Treat SCHED_IDLE as nice 20. Only allow a switch to
7527 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
7528 */
7529 if (task_has_idle_policy(p) && !idle_policy(policy)) {
7530 if (!can_nice(p, task_nice(p)))
7531 return -EPERM;
7532 }
7533
7534 /* Can't change other user's priorities: */
7535 if (!check_same_owner(p))
7536 return -EPERM;
7537
7538 /* Normal users shall not reset the sched_reset_on_fork flag: */
7539 if (p->sched_reset_on_fork && !reset_on_fork)
7540 return -EPERM;
7541
7542 /* Can't change util-clamps */
7543 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
7544 return -EPERM;
7545 }
7546
7547 if (user) {
7548 if (attr->sched_flags & SCHED_FLAG_SUGOV)
7549 return -EINVAL;
7550
7551 retval = security_task_setscheduler(p);
7552 if (retval)
7553 return retval;
7554 }
7555
7556 /* Update task specific "requested" clamps */
7557 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
7558 retval = uclamp_validate(p, attr);
7559 if (retval)
7560 return retval;
7561 }
7562
7563 /*
7564 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
7565 * information.
7566 */
7567 if (dl_policy(policy) || dl_policy(p->policy)) {
7568 cpuset_locked = true;
7569 cpuset_lock();
7570 }
7571
7572 /*
7573 * Make sure no PI-waiters arrive (or leave) while we are
7574 * changing the priority of the task:
7575 *
7576 * To be able to change p->policy safely, the appropriate
7577 * runqueue lock must be held.
7578 */
7579 rq = task_rq_lock(p, &rf);
7580 update_rq_clock(rq);
7581
7582 /*
7583 * Changing the policy of the stop threads its a very bad idea:
7584 */
7585 if (p == rq->stop) {
7586 retval = -EINVAL;
7587 goto unlock;
7588 }
7589
7590 /*
7591 * If not changing anything there's no need to proceed further,
7592 * but store a possible modification of reset_on_fork.
7593 */
7594 if (unlikely(policy == p->policy)) {
7595 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
7596 goto change;
7597 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
7598 goto change;
7599 if (dl_policy(policy) && dl_param_changed(p, attr))
7600 goto change;
7601 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
7602 goto change;
7603
7604 p->sched_reset_on_fork = reset_on_fork;
7605 retval = 0;
7606 goto unlock;
7607 }
7608 change:
7609
7610 if (user) {
7611 #ifdef CONFIG_RT_GROUP_SCHED
7612 /*
7613 * Do not allow realtime tasks into groups that have no runtime
7614 * assigned.
7615 */
7616 if (rt_bandwidth_enabled() && rt_policy(policy) &&
7617 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
7618 !task_group_is_autogroup(task_group(p))) {
7619 retval = -EPERM;
7620 goto unlock;
7621 }
7622 #endif
7623 #ifdef CONFIG_SMP
7624 if (dl_bandwidth_enabled() && dl_policy(policy) &&
7625 !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
7626 cpumask_t *span = rq->rd->span;
7627
7628 /*
7629 * Don't allow tasks with an affinity mask smaller than
7630 * the entire root_domain to become SCHED_DEADLINE. We
7631 * will also fail if there's no bandwidth available.
7632 */
7633 if (!cpumask_subset(span, p->cpus_ptr) ||
7634 rq->rd->dl_bw.bw == 0) {
7635 retval = -EPERM;
7636 goto unlock;
7637 }
7638 }
7639 #endif
7640 }
7641
7642 /* Re-check policy now with rq lock held: */
7643 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
7644 policy = oldpolicy = -1;
7645 task_rq_unlock(rq, p, &rf);
7646 if (cpuset_locked)
7647 cpuset_unlock();
7648 goto recheck;
7649 }
7650
7651 /*
7652 * If setscheduling to SCHED_DEADLINE (or changing the parameters
7653 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
7654 * is available.
7655 */
7656 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
7657 retval = -EBUSY;
7658 goto unlock;
7659 }
7660
7661 p->sched_reset_on_fork = reset_on_fork;
7662 oldprio = p->prio;
7663
7664 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
7665 if (pi) {
7666 /*
7667 * Take priority boosted tasks into account. If the new
7668 * effective priority is unchanged, we just store the new
7669 * normal parameters and do not touch the scheduler class and
7670 * the runqueue. This will be done when the task deboost
7671 * itself.
7672 */
7673 newprio = rt_effective_prio(p, newprio);
7674 if (newprio == oldprio)
7675 queue_flags &= ~DEQUEUE_MOVE;
7676 }
7677
7678 queued = task_on_rq_queued(p);
7679 running = task_current(rq, p);
7680 if (queued)
7681 dequeue_task(rq, p, queue_flags);
7682 if (running)
7683 put_prev_task(rq, p);
7684
7685 prev_class = p->sched_class;
7686
7687 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
7688 __setscheduler_params(p, attr);
7689 __setscheduler_prio(p, newprio);
7690 }
7691 __setscheduler_uclamp(p, attr);
7692
7693 if (queued) {
7694 /*
7695 * We enqueue to tail when the priority of a task is
7696 * increased (user space view).
7697 */
7698 if (oldprio < p->prio)
7699 queue_flags |= ENQUEUE_HEAD;
7700
7701 enqueue_task(rq, p, queue_flags);
7702 }
7703 if (running)
7704 set_next_task(rq, p);
7705
7706 check_class_changed(rq, p, prev_class, oldprio);
7707
7708 /* Avoid rq from going away on us: */
7709 preempt_disable();
7710 head = splice_balance_callbacks(rq);
7711 task_rq_unlock(rq, p, &rf);
7712
7713 if (pi) {
7714 if (cpuset_locked)
7715 cpuset_unlock();
7716 rt_mutex_adjust_pi(p);
7717 }
7718
7719 /* Run balance callbacks after we've adjusted the PI chain: */
7720 balance_callbacks(rq, head);
7721 preempt_enable();
7722
7723 return 0;
7724
7725 unlock:
7726 task_rq_unlock(rq, p, &rf);
7727 if (cpuset_locked)
7728 cpuset_unlock();
7729 return retval;
7730 }
7731
_sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param,bool check)7732 static int _sched_setscheduler(struct task_struct *p, int policy,
7733 const struct sched_param *param, bool check)
7734 {
7735 struct sched_attr attr = {
7736 .sched_policy = policy,
7737 .sched_priority = param->sched_priority,
7738 .sched_nice = PRIO_TO_NICE(p->static_prio),
7739 };
7740
7741 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
7742 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
7743 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
7744 policy &= ~SCHED_RESET_ON_FORK;
7745 attr.sched_policy = policy;
7746 }
7747
7748 return __sched_setscheduler(p, &attr, check, true);
7749 }
7750 /**
7751 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
7752 * @p: the task in question.
7753 * @policy: new policy.
7754 * @param: structure containing the new RT priority.
7755 *
7756 * Use sched_set_fifo(), read its comment.
7757 *
7758 * Return: 0 on success. An error code otherwise.
7759 *
7760 * NOTE that the task may be already dead.
7761 */
sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param)7762 int sched_setscheduler(struct task_struct *p, int policy,
7763 const struct sched_param *param)
7764 {
7765 return _sched_setscheduler(p, policy, param, true);
7766 }
7767 EXPORT_SYMBOL_GPL(sched_setscheduler);
7768
sched_setattr(struct task_struct * p,const struct sched_attr * attr)7769 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
7770 {
7771 return __sched_setscheduler(p, attr, true, true);
7772 }
7773 EXPORT_SYMBOL_GPL(sched_setattr);
7774
sched_setattr_nocheck(struct task_struct * p,const struct sched_attr * attr)7775 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
7776 {
7777 return __sched_setscheduler(p, attr, false, true);
7778 }
7779 EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
7780
7781 /**
7782 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
7783 * @p: the task in question.
7784 * @policy: new policy.
7785 * @param: structure containing the new RT priority.
7786 *
7787 * Just like sched_setscheduler, only don't bother checking if the
7788 * current context has permission. For example, this is needed in
7789 * stop_machine(): we create temporary high priority worker threads,
7790 * but our caller might not have that capability.
7791 *
7792 * Return: 0 on success. An error code otherwise.
7793 */
sched_setscheduler_nocheck(struct task_struct * p,int policy,const struct sched_param * param)7794 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
7795 const struct sched_param *param)
7796 {
7797 return _sched_setscheduler(p, policy, param, false);
7798 }
7799 EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
7800
7801 /*
7802 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
7803 * incapable of resource management, which is the one thing an OS really should
7804 * be doing.
7805 *
7806 * This is of course the reason it is limited to privileged users only.
7807 *
7808 * Worse still; it is fundamentally impossible to compose static priority
7809 * workloads. You cannot take two correctly working static prio workloads
7810 * and smash them together and still expect them to work.
7811 *
7812 * For this reason 'all' FIFO tasks the kernel creates are basically at:
7813 *
7814 * MAX_RT_PRIO / 2
7815 *
7816 * The administrator _MUST_ configure the system, the kernel simply doesn't
7817 * know enough information to make a sensible choice.
7818 */
sched_set_fifo(struct task_struct * p)7819 void sched_set_fifo(struct task_struct *p)
7820 {
7821 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
7822 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
7823 }
7824 EXPORT_SYMBOL_GPL(sched_set_fifo);
7825
7826 /*
7827 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
7828 */
sched_set_fifo_low(struct task_struct * p)7829 void sched_set_fifo_low(struct task_struct *p)
7830 {
7831 struct sched_param sp = { .sched_priority = 1 };
7832 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
7833 }
7834 EXPORT_SYMBOL_GPL(sched_set_fifo_low);
7835
sched_set_normal(struct task_struct * p,int nice)7836 void sched_set_normal(struct task_struct *p, int nice)
7837 {
7838 struct sched_attr attr = {
7839 .sched_policy = SCHED_NORMAL,
7840 .sched_nice = nice,
7841 };
7842 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
7843 }
7844 EXPORT_SYMBOL_GPL(sched_set_normal);
7845
7846 static int
do_sched_setscheduler(pid_t pid,int policy,struct sched_param __user * param)7847 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
7848 {
7849 struct sched_param lparam;
7850 struct task_struct *p;
7851 int retval;
7852
7853 if (!param || pid < 0)
7854 return -EINVAL;
7855 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
7856 return -EFAULT;
7857
7858 rcu_read_lock();
7859 retval = -ESRCH;
7860 p = find_process_by_pid(pid);
7861 if (p != NULL)
7862 retval = sched_setscheduler(p, policy, &lparam);
7863 rcu_read_unlock();
7864
7865 return retval;
7866 }
7867
7868 /*
7869 * Mimics kernel/events/core.c perf_copy_attr().
7870 */
sched_copy_attr(struct sched_attr __user * uattr,struct sched_attr * attr)7871 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
7872 {
7873 u32 size;
7874 int ret;
7875
7876 /* Zero the full structure, so that a short copy will be nice: */
7877 memset(attr, 0, sizeof(*attr));
7878
7879 ret = get_user(size, &uattr->size);
7880 if (ret)
7881 return ret;
7882
7883 /* ABI compatibility quirk: */
7884 if (!size)
7885 size = SCHED_ATTR_SIZE_VER0;
7886 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
7887 goto err_size;
7888
7889 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
7890 if (ret) {
7891 if (ret == -E2BIG)
7892 goto err_size;
7893 return ret;
7894 }
7895
7896 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
7897 size < SCHED_ATTR_SIZE_VER1)
7898 return -EINVAL;
7899
7900 /*
7901 * XXX: Do we want to be lenient like existing syscalls; or do we want
7902 * to be strict and return an error on out-of-bounds values?
7903 */
7904 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
7905
7906 return 0;
7907
7908 err_size:
7909 put_user(sizeof(*attr), &uattr->size);
7910 return -E2BIG;
7911 }
7912
get_params(struct task_struct * p,struct sched_attr * attr)7913 static void get_params(struct task_struct *p, struct sched_attr *attr)
7914 {
7915 if (task_has_dl_policy(p))
7916 __getparam_dl(p, attr);
7917 else if (task_has_rt_policy(p))
7918 attr->sched_priority = p->rt_priority;
7919 else
7920 attr->sched_nice = task_nice(p);
7921 }
7922
7923 /**
7924 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
7925 * @pid: the pid in question.
7926 * @policy: new policy.
7927 * @param: structure containing the new RT priority.
7928 *
7929 * Return: 0 on success. An error code otherwise.
7930 */
SYSCALL_DEFINE3(sched_setscheduler,pid_t,pid,int,policy,struct sched_param __user *,param)7931 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
7932 {
7933 if (policy < 0)
7934 return -EINVAL;
7935
7936 return do_sched_setscheduler(pid, policy, param);
7937 }
7938
7939 /**
7940 * sys_sched_setparam - set/change the RT priority of a thread
7941 * @pid: the pid in question.
7942 * @param: structure containing the new RT priority.
7943 *
7944 * Return: 0 on success. An error code otherwise.
7945 */
SYSCALL_DEFINE2(sched_setparam,pid_t,pid,struct sched_param __user *,param)7946 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
7947 {
7948 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
7949 }
7950
7951 /**
7952 * sys_sched_setattr - same as above, but with extended sched_attr
7953 * @pid: the pid in question.
7954 * @uattr: structure containing the extended parameters.
7955 * @flags: for future extension.
7956 */
SYSCALL_DEFINE3(sched_setattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,flags)7957 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
7958 unsigned int, flags)
7959 {
7960 struct sched_attr attr;
7961 struct task_struct *p;
7962 int retval;
7963
7964 if (!uattr || pid < 0 || flags)
7965 return -EINVAL;
7966
7967 retval = sched_copy_attr(uattr, &attr);
7968 if (retval)
7969 return retval;
7970
7971 if ((int)attr.sched_policy < 0)
7972 return -EINVAL;
7973 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
7974 attr.sched_policy = SETPARAM_POLICY;
7975
7976 rcu_read_lock();
7977 retval = -ESRCH;
7978 p = find_process_by_pid(pid);
7979 if (likely(p))
7980 get_task_struct(p);
7981 rcu_read_unlock();
7982
7983 if (likely(p)) {
7984 if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
7985 get_params(p, &attr);
7986 retval = sched_setattr(p, &attr);
7987 put_task_struct(p);
7988 }
7989
7990 return retval;
7991 }
7992
7993 /**
7994 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
7995 * @pid: the pid in question.
7996 *
7997 * Return: On success, the policy of the thread. Otherwise, a negative error
7998 * code.
7999 */
SYSCALL_DEFINE1(sched_getscheduler,pid_t,pid)8000 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
8001 {
8002 struct task_struct *p;
8003 int retval;
8004
8005 if (pid < 0)
8006 return -EINVAL;
8007
8008 retval = -ESRCH;
8009 rcu_read_lock();
8010 p = find_process_by_pid(pid);
8011 if (p) {
8012 retval = security_task_getscheduler(p);
8013 if (!retval)
8014 retval = p->policy
8015 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
8016 }
8017 rcu_read_unlock();
8018 return retval;
8019 }
8020
8021 /**
8022 * sys_sched_getparam - get the RT priority of a thread
8023 * @pid: the pid in question.
8024 * @param: structure containing the RT priority.
8025 *
8026 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
8027 * code.
8028 */
SYSCALL_DEFINE2(sched_getparam,pid_t,pid,struct sched_param __user *,param)8029 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
8030 {
8031 struct sched_param lp = { .sched_priority = 0 };
8032 struct task_struct *p;
8033 int retval;
8034
8035 if (!param || pid < 0)
8036 return -EINVAL;
8037
8038 rcu_read_lock();
8039 p = find_process_by_pid(pid);
8040 retval = -ESRCH;
8041 if (!p)
8042 goto out_unlock;
8043
8044 retval = security_task_getscheduler(p);
8045 if (retval)
8046 goto out_unlock;
8047
8048 if (task_has_rt_policy(p))
8049 lp.sched_priority = p->rt_priority;
8050 rcu_read_unlock();
8051
8052 /*
8053 * This one might sleep, we cannot do it with a spinlock held ...
8054 */
8055 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
8056
8057 return retval;
8058
8059 out_unlock:
8060 rcu_read_unlock();
8061 return retval;
8062 }
8063
8064 /*
8065 * Copy the kernel size attribute structure (which might be larger
8066 * than what user-space knows about) to user-space.
8067 *
8068 * Note that all cases are valid: user-space buffer can be larger or
8069 * smaller than the kernel-space buffer. The usual case is that both
8070 * have the same size.
8071 */
8072 static int
sched_attr_copy_to_user(struct sched_attr __user * uattr,struct sched_attr * kattr,unsigned int usize)8073 sched_attr_copy_to_user(struct sched_attr __user *uattr,
8074 struct sched_attr *kattr,
8075 unsigned int usize)
8076 {
8077 unsigned int ksize = sizeof(*kattr);
8078
8079 if (!access_ok(uattr, usize))
8080 return -EFAULT;
8081
8082 /*
8083 * sched_getattr() ABI forwards and backwards compatibility:
8084 *
8085 * If usize == ksize then we just copy everything to user-space and all is good.
8086 *
8087 * If usize < ksize then we only copy as much as user-space has space for,
8088 * this keeps ABI compatibility as well. We skip the rest.
8089 *
8090 * If usize > ksize then user-space is using a newer version of the ABI,
8091 * which part the kernel doesn't know about. Just ignore it - tooling can
8092 * detect the kernel's knowledge of attributes from the attr->size value
8093 * which is set to ksize in this case.
8094 */
8095 kattr->size = min(usize, ksize);
8096
8097 if (copy_to_user(uattr, kattr, kattr->size))
8098 return -EFAULT;
8099
8100 return 0;
8101 }
8102
8103 /**
8104 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
8105 * @pid: the pid in question.
8106 * @uattr: structure containing the extended parameters.
8107 * @usize: sizeof(attr) for fwd/bwd comp.
8108 * @flags: for future extension.
8109 */
SYSCALL_DEFINE4(sched_getattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,usize,unsigned int,flags)8110 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
8111 unsigned int, usize, unsigned int, flags)
8112 {
8113 struct sched_attr kattr = { };
8114 struct task_struct *p;
8115 int retval;
8116
8117 if (!uattr || pid < 0 || usize > PAGE_SIZE ||
8118 usize < SCHED_ATTR_SIZE_VER0 || flags)
8119 return -EINVAL;
8120
8121 rcu_read_lock();
8122 p = find_process_by_pid(pid);
8123 retval = -ESRCH;
8124 if (!p)
8125 goto out_unlock;
8126
8127 retval = security_task_getscheduler(p);
8128 if (retval)
8129 goto out_unlock;
8130
8131 kattr.sched_policy = p->policy;
8132 if (p->sched_reset_on_fork)
8133 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
8134 get_params(p, &kattr);
8135 kattr.sched_flags &= SCHED_FLAG_ALL;
8136
8137 #ifdef CONFIG_UCLAMP_TASK
8138 /*
8139 * This could race with another potential updater, but this is fine
8140 * because it'll correctly read the old or the new value. We don't need
8141 * to guarantee who wins the race as long as it doesn't return garbage.
8142 */
8143 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
8144 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
8145 #endif
8146
8147 rcu_read_unlock();
8148
8149 return sched_attr_copy_to_user(uattr, &kattr, usize);
8150
8151 out_unlock:
8152 rcu_read_unlock();
8153 return retval;
8154 }
8155
8156 #ifdef CONFIG_SMP
dl_task_check_affinity(struct task_struct * p,const struct cpumask * mask)8157 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
8158 {
8159 int ret = 0;
8160
8161 /*
8162 * If the task isn't a deadline task or admission control is
8163 * disabled then we don't care about affinity changes.
8164 */
8165 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
8166 return 0;
8167
8168 /*
8169 * Since bandwidth control happens on root_domain basis,
8170 * if admission test is enabled, we only admit -deadline
8171 * tasks allowed to run on all the CPUs in the task's
8172 * root_domain.
8173 */
8174 rcu_read_lock();
8175 if (!cpumask_subset(task_rq(p)->rd->span, mask))
8176 ret = -EBUSY;
8177 rcu_read_unlock();
8178 return ret;
8179 }
8180 #endif
8181
8182 static int
__sched_setaffinity(struct task_struct * p,const struct cpumask * mask)8183 __sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
8184 {
8185 int retval;
8186 cpumask_var_t cpus_allowed, new_mask;
8187
8188 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
8189 return -ENOMEM;
8190
8191 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
8192 retval = -ENOMEM;
8193 goto out_free_cpus_allowed;
8194 }
8195
8196 cpuset_cpus_allowed(p, cpus_allowed);
8197 cpumask_and(new_mask, mask, cpus_allowed);
8198
8199 retval = dl_task_check_affinity(p, new_mask);
8200 if (retval)
8201 goto out_free_new_mask;
8202 again:
8203 retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
8204 if (retval)
8205 goto out_free_new_mask;
8206
8207 cpuset_cpus_allowed(p, cpus_allowed);
8208 if (!cpumask_subset(new_mask, cpus_allowed)) {
8209 /*
8210 * We must have raced with a concurrent cpuset update.
8211 * Just reset the cpumask to the cpuset's cpus_allowed.
8212 */
8213 cpumask_copy(new_mask, cpus_allowed);
8214 goto again;
8215 }
8216
8217 out_free_new_mask:
8218 free_cpumask_var(new_mask);
8219 out_free_cpus_allowed:
8220 free_cpumask_var(cpus_allowed);
8221 return retval;
8222 }
8223
sched_setaffinity(pid_t pid,const struct cpumask * in_mask)8224 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
8225 {
8226 struct task_struct *p;
8227 int retval = 0;
8228 int skip = 0;
8229
8230 rcu_read_lock();
8231
8232 p = find_process_by_pid(pid);
8233 if (!p) {
8234 rcu_read_unlock();
8235 return -ESRCH;
8236 }
8237
8238 /* Prevent p going away */
8239 get_task_struct(p);
8240 rcu_read_unlock();
8241
8242 if (p->flags & PF_NO_SETAFFINITY) {
8243 retval = -EINVAL;
8244 goto out_put_task;
8245 }
8246
8247 if (!check_same_owner(p)) {
8248 rcu_read_lock();
8249 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
8250 rcu_read_unlock();
8251 retval = -EPERM;
8252 goto out_put_task;
8253 }
8254 rcu_read_unlock();
8255 }
8256
8257 trace_android_vh_sched_setaffinity_early(p, in_mask, &skip);
8258 if (skip)
8259 goto out_put_task;
8260 retval = security_task_setscheduler(p);
8261 if (retval)
8262 goto out_put_task;
8263
8264 retval = __sched_setaffinity(p, in_mask);
8265 trace_android_rvh_sched_setaffinity(p, in_mask, &retval);
8266
8267 out_put_task:
8268 put_task_struct(p);
8269 return retval;
8270 }
8271
get_user_cpu_mask(unsigned long __user * user_mask_ptr,unsigned len,struct cpumask * new_mask)8272 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
8273 struct cpumask *new_mask)
8274 {
8275 if (len < cpumask_size())
8276 cpumask_clear(new_mask);
8277 else if (len > cpumask_size())
8278 len = cpumask_size();
8279
8280 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
8281 }
8282
8283 /**
8284 * sys_sched_setaffinity - set the CPU affinity of a process
8285 * @pid: pid of the process
8286 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
8287 * @user_mask_ptr: user-space pointer to the new CPU mask
8288 *
8289 * Return: 0 on success. An error code otherwise.
8290 */
SYSCALL_DEFINE3(sched_setaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)8291 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
8292 unsigned long __user *, user_mask_ptr)
8293 {
8294 cpumask_var_t new_mask;
8295 int retval;
8296
8297 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
8298 return -ENOMEM;
8299
8300 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
8301 if (retval == 0)
8302 retval = sched_setaffinity(pid, new_mask);
8303 free_cpumask_var(new_mask);
8304 return retval;
8305 }
8306
sched_getaffinity(pid_t pid,struct cpumask * mask)8307 long sched_getaffinity(pid_t pid, struct cpumask *mask)
8308 {
8309 struct task_struct *p;
8310 unsigned long flags;
8311 int retval;
8312
8313 rcu_read_lock();
8314
8315 retval = -ESRCH;
8316 p = find_process_by_pid(pid);
8317 if (!p)
8318 goto out_unlock;
8319
8320 retval = security_task_getscheduler(p);
8321 if (retval)
8322 goto out_unlock;
8323
8324 raw_spin_lock_irqsave(&p->pi_lock, flags);
8325 cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
8326 trace_android_rvh_sched_getaffinity(p, mask);
8327 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
8328
8329 out_unlock:
8330 rcu_read_unlock();
8331
8332 return retval;
8333 }
8334
8335 /**
8336 * sys_sched_getaffinity - get the CPU affinity of a process
8337 * @pid: pid of the process
8338 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
8339 * @user_mask_ptr: user-space pointer to hold the current CPU mask
8340 *
8341 * Return: size of CPU mask copied to user_mask_ptr on success. An
8342 * error code otherwise.
8343 */
SYSCALL_DEFINE3(sched_getaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)8344 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
8345 unsigned long __user *, user_mask_ptr)
8346 {
8347 int ret;
8348 cpumask_var_t mask;
8349
8350 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
8351 return -EINVAL;
8352 if (len & (sizeof(unsigned long)-1))
8353 return -EINVAL;
8354
8355 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
8356 return -ENOMEM;
8357
8358 ret = sched_getaffinity(pid, mask);
8359 if (ret == 0) {
8360 unsigned int retlen = min(len, cpumask_size());
8361
8362 if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
8363 ret = -EFAULT;
8364 else
8365 ret = retlen;
8366 }
8367 free_cpumask_var(mask);
8368
8369 return ret;
8370 }
8371
do_sched_yield(void)8372 static void do_sched_yield(void)
8373 {
8374 struct rq_flags rf;
8375 struct rq *rq;
8376
8377 rq = this_rq_lock_irq(&rf);
8378
8379 schedstat_inc(rq->yld_count);
8380 current->sched_class->yield_task(rq);
8381
8382 trace_android_rvh_do_sched_yield(rq);
8383
8384 preempt_disable();
8385 rq_unlock_irq(rq, &rf);
8386 sched_preempt_enable_no_resched();
8387
8388 schedule();
8389 }
8390
8391 /**
8392 * sys_sched_yield - yield the current processor to other threads.
8393 *
8394 * This function yields the current CPU to other tasks. If there are no
8395 * other threads running on this CPU then this function will return.
8396 *
8397 * Return: 0.
8398 */
SYSCALL_DEFINE0(sched_yield)8399 SYSCALL_DEFINE0(sched_yield)
8400 {
8401 do_sched_yield();
8402 return 0;
8403 }
8404
8405 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
__cond_resched(void)8406 int __sched __cond_resched(void)
8407 {
8408 if (should_resched(0)) {
8409 preempt_schedule_common();
8410 return 1;
8411 }
8412 /*
8413 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
8414 * whether the current CPU is in an RCU read-side critical section,
8415 * so the tick can report quiescent states even for CPUs looping
8416 * in kernel context. In contrast, in non-preemptible kernels,
8417 * RCU readers leave no in-memory hints, which means that CPU-bound
8418 * processes executing in kernel context might never report an
8419 * RCU quiescent state. Therefore, the following code causes
8420 * cond_resched() to report a quiescent state, but only when RCU
8421 * is in urgent need of one.
8422 */
8423 #ifndef CONFIG_PREEMPT_RCU
8424 rcu_all_qs();
8425 #endif
8426 return 0;
8427 }
8428 EXPORT_SYMBOL(__cond_resched);
8429 #endif
8430
8431 #ifdef CONFIG_PREEMPT_DYNAMIC
8432 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
8433 EXPORT_STATIC_CALL_TRAMP(cond_resched);
8434
8435 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
8436 EXPORT_STATIC_CALL_TRAMP(might_resched);
8437 #endif
8438
8439 /*
8440 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
8441 * call schedule, and on return reacquire the lock.
8442 *
8443 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
8444 * operations here to prevent schedule() from being called twice (once via
8445 * spin_unlock(), once by hand).
8446 */
__cond_resched_lock(spinlock_t * lock)8447 int __cond_resched_lock(spinlock_t *lock)
8448 {
8449 int resched = should_resched(PREEMPT_LOCK_OFFSET);
8450 int ret = 0;
8451
8452 lockdep_assert_held(lock);
8453
8454 if (spin_needbreak(lock) || resched) {
8455 spin_unlock(lock);
8456 if (!_cond_resched())
8457 cpu_relax();
8458 ret = 1;
8459 spin_lock(lock);
8460 }
8461 return ret;
8462 }
8463 EXPORT_SYMBOL(__cond_resched_lock);
8464
__cond_resched_rwlock_read(rwlock_t * lock)8465 int __cond_resched_rwlock_read(rwlock_t *lock)
8466 {
8467 int resched = should_resched(PREEMPT_LOCK_OFFSET);
8468 int ret = 0;
8469
8470 lockdep_assert_held_read(lock);
8471
8472 if (rwlock_needbreak(lock) || resched) {
8473 read_unlock(lock);
8474 if (!_cond_resched())
8475 cpu_relax();
8476 ret = 1;
8477 read_lock(lock);
8478 }
8479 return ret;
8480 }
8481 EXPORT_SYMBOL(__cond_resched_rwlock_read);
8482
__cond_resched_rwlock_write(rwlock_t * lock)8483 int __cond_resched_rwlock_write(rwlock_t *lock)
8484 {
8485 int resched = should_resched(PREEMPT_LOCK_OFFSET);
8486 int ret = 0;
8487
8488 lockdep_assert_held_write(lock);
8489
8490 if (rwlock_needbreak(lock) || resched) {
8491 write_unlock(lock);
8492 if (!_cond_resched())
8493 cpu_relax();
8494 ret = 1;
8495 write_lock(lock);
8496 }
8497 return ret;
8498 }
8499 EXPORT_SYMBOL(__cond_resched_rwlock_write);
8500
8501 /**
8502 * yield - yield the current processor to other threads.
8503 *
8504 * Do not ever use this function, there's a 99% chance you're doing it wrong.
8505 *
8506 * The scheduler is at all times free to pick the calling task as the most
8507 * eligible task to run, if removing the yield() call from your code breaks
8508 * it, it's already broken.
8509 *
8510 * Typical broken usage is:
8511 *
8512 * while (!event)
8513 * yield();
8514 *
8515 * where one assumes that yield() will let 'the other' process run that will
8516 * make event true. If the current task is a SCHED_FIFO task that will never
8517 * happen. Never use yield() as a progress guarantee!!
8518 *
8519 * If you want to use yield() to wait for something, use wait_event().
8520 * If you want to use yield() to be 'nice' for others, use cond_resched().
8521 * If you still want to use yield(), do not!
8522 */
yield(void)8523 void __sched yield(void)
8524 {
8525 set_current_state(TASK_RUNNING);
8526 do_sched_yield();
8527 }
8528 EXPORT_SYMBOL(yield);
8529
8530 /**
8531 * yield_to - yield the current processor to another thread in
8532 * your thread group, or accelerate that thread toward the
8533 * processor it's on.
8534 * @p: target task
8535 * @preempt: whether task preemption is allowed or not
8536 *
8537 * It's the caller's job to ensure that the target task struct
8538 * can't go away on us before we can do any checks.
8539 *
8540 * Return:
8541 * true (>0) if we indeed boosted the target task.
8542 * false (0) if we failed to boost the target.
8543 * -ESRCH if there's no task to yield to.
8544 */
yield_to(struct task_struct * p,bool preempt)8545 int __sched yield_to(struct task_struct *p, bool preempt)
8546 {
8547 struct task_struct *curr = current;
8548 struct rq *rq, *p_rq;
8549 unsigned long flags;
8550 int yielded = 0;
8551
8552 local_irq_save(flags);
8553 rq = this_rq();
8554
8555 again:
8556 p_rq = task_rq(p);
8557 /*
8558 * If we're the only runnable task on the rq and target rq also
8559 * has only one task, there's absolutely no point in yielding.
8560 */
8561 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
8562 yielded = -ESRCH;
8563 goto out_irq;
8564 }
8565
8566 double_rq_lock(rq, p_rq);
8567 if (task_rq(p) != p_rq) {
8568 double_rq_unlock(rq, p_rq);
8569 goto again;
8570 }
8571
8572 if (!curr->sched_class->yield_to_task)
8573 goto out_unlock;
8574
8575 if (curr->sched_class != p->sched_class)
8576 goto out_unlock;
8577
8578 if (task_running(p_rq, p) || !task_is_running(p))
8579 goto out_unlock;
8580
8581 yielded = curr->sched_class->yield_to_task(rq, p);
8582 if (yielded) {
8583 schedstat_inc(rq->yld_count);
8584 /*
8585 * Make p's CPU reschedule; pick_next_entity takes care of
8586 * fairness.
8587 */
8588 if (preempt && rq != p_rq)
8589 resched_curr(p_rq);
8590 }
8591
8592 out_unlock:
8593 double_rq_unlock(rq, p_rq);
8594 out_irq:
8595 local_irq_restore(flags);
8596
8597 if (yielded > 0)
8598 schedule();
8599
8600 return yielded;
8601 }
8602 EXPORT_SYMBOL_GPL(yield_to);
8603
io_schedule_prepare(void)8604 int io_schedule_prepare(void)
8605 {
8606 int old_iowait = current->in_iowait;
8607
8608 current->in_iowait = 1;
8609 blk_schedule_flush_plug(current);
8610
8611 return old_iowait;
8612 }
8613
io_schedule_finish(int token)8614 void io_schedule_finish(int token)
8615 {
8616 current->in_iowait = token;
8617 }
8618
8619 /*
8620 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
8621 * that process accounting knows that this is a task in IO wait state.
8622 */
io_schedule_timeout(long timeout)8623 long __sched io_schedule_timeout(long timeout)
8624 {
8625 int token;
8626 long ret;
8627
8628 token = io_schedule_prepare();
8629 ret = schedule_timeout(timeout);
8630 io_schedule_finish(token);
8631
8632 return ret;
8633 }
8634 EXPORT_SYMBOL(io_schedule_timeout);
8635
io_schedule(void)8636 void __sched io_schedule(void)
8637 {
8638 int token;
8639
8640 token = io_schedule_prepare();
8641 schedule();
8642 io_schedule_finish(token);
8643 }
8644 EXPORT_SYMBOL(io_schedule);
8645
8646 /**
8647 * sys_sched_get_priority_max - return maximum RT priority.
8648 * @policy: scheduling class.
8649 *
8650 * Return: On success, this syscall returns the maximum
8651 * rt_priority that can be used by a given scheduling class.
8652 * On failure, a negative error code is returned.
8653 */
SYSCALL_DEFINE1(sched_get_priority_max,int,policy)8654 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
8655 {
8656 int ret = -EINVAL;
8657
8658 switch (policy) {
8659 case SCHED_FIFO:
8660 case SCHED_RR:
8661 ret = MAX_RT_PRIO-1;
8662 break;
8663 case SCHED_DEADLINE:
8664 case SCHED_NORMAL:
8665 case SCHED_BATCH:
8666 case SCHED_IDLE:
8667 ret = 0;
8668 break;
8669 }
8670 return ret;
8671 }
8672
8673 /**
8674 * sys_sched_get_priority_min - return minimum RT priority.
8675 * @policy: scheduling class.
8676 *
8677 * Return: On success, this syscall returns the minimum
8678 * rt_priority that can be used by a given scheduling class.
8679 * On failure, a negative error code is returned.
8680 */
SYSCALL_DEFINE1(sched_get_priority_min,int,policy)8681 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
8682 {
8683 int ret = -EINVAL;
8684
8685 switch (policy) {
8686 case SCHED_FIFO:
8687 case SCHED_RR:
8688 ret = 1;
8689 break;
8690 case SCHED_DEADLINE:
8691 case SCHED_NORMAL:
8692 case SCHED_BATCH:
8693 case SCHED_IDLE:
8694 ret = 0;
8695 }
8696 return ret;
8697 }
8698
sched_rr_get_interval(pid_t pid,struct timespec64 * t)8699 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
8700 {
8701 struct task_struct *p;
8702 unsigned int time_slice;
8703 struct rq_flags rf;
8704 struct rq *rq;
8705 int retval;
8706
8707 if (pid < 0)
8708 return -EINVAL;
8709
8710 retval = -ESRCH;
8711 rcu_read_lock();
8712 p = find_process_by_pid(pid);
8713 if (!p)
8714 goto out_unlock;
8715
8716 retval = security_task_getscheduler(p);
8717 if (retval)
8718 goto out_unlock;
8719
8720 rq = task_rq_lock(p, &rf);
8721 time_slice = 0;
8722 if (p->sched_class->get_rr_interval)
8723 time_slice = p->sched_class->get_rr_interval(rq, p);
8724 task_rq_unlock(rq, p, &rf);
8725
8726 rcu_read_unlock();
8727 jiffies_to_timespec64(time_slice, t);
8728 return 0;
8729
8730 out_unlock:
8731 rcu_read_unlock();
8732 return retval;
8733 }
8734
8735 /**
8736 * sys_sched_rr_get_interval - return the default timeslice of a process.
8737 * @pid: pid of the process.
8738 * @interval: userspace pointer to the timeslice value.
8739 *
8740 * this syscall writes the default timeslice value of a given process
8741 * into the user-space timespec buffer. A value of '0' means infinity.
8742 *
8743 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
8744 * an error code.
8745 */
SYSCALL_DEFINE2(sched_rr_get_interval,pid_t,pid,struct __kernel_timespec __user *,interval)8746 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
8747 struct __kernel_timespec __user *, interval)
8748 {
8749 struct timespec64 t;
8750 int retval = sched_rr_get_interval(pid, &t);
8751
8752 if (retval == 0)
8753 retval = put_timespec64(&t, interval);
8754
8755 return retval;
8756 }
8757
8758 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE2(sched_rr_get_interval_time32,pid_t,pid,struct old_timespec32 __user *,interval)8759 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
8760 struct old_timespec32 __user *, interval)
8761 {
8762 struct timespec64 t;
8763 int retval = sched_rr_get_interval(pid, &t);
8764
8765 if (retval == 0)
8766 retval = put_old_timespec32(&t, interval);
8767 return retval;
8768 }
8769 #endif
8770
sched_show_task(struct task_struct * p)8771 void sched_show_task(struct task_struct *p)
8772 {
8773 unsigned long free = 0;
8774 int ppid;
8775
8776 if (!try_get_task_stack(p))
8777 return;
8778
8779 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
8780
8781 if (task_is_running(p))
8782 pr_cont(" running task ");
8783 #ifdef CONFIG_DEBUG_STACK_USAGE
8784 free = stack_not_used(p);
8785 #endif
8786 ppid = 0;
8787 rcu_read_lock();
8788 if (pid_alive(p))
8789 ppid = task_pid_nr(rcu_dereference(p->real_parent));
8790 rcu_read_unlock();
8791 pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n",
8792 free, task_pid_nr(p), ppid,
8793 (unsigned long)task_thread_info(p)->flags);
8794
8795 print_worker_info(KERN_INFO, p);
8796 print_stop_info(KERN_INFO, p);
8797 trace_android_vh_sched_show_task(p);
8798 show_stack(p, NULL, KERN_INFO);
8799 put_task_stack(p);
8800 }
8801 EXPORT_SYMBOL_GPL(sched_show_task);
8802
8803 static inline bool
state_filter_match(unsigned long state_filter,struct task_struct * p)8804 state_filter_match(unsigned long state_filter, struct task_struct *p)
8805 {
8806 unsigned int state = READ_ONCE(p->__state);
8807
8808 /* no filter, everything matches */
8809 if (!state_filter)
8810 return true;
8811
8812 /* filter, but doesn't match */
8813 if (!(state & state_filter))
8814 return false;
8815
8816 /*
8817 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
8818 * TASK_KILLABLE).
8819 */
8820 if (state_filter == TASK_UNINTERRUPTIBLE && state == TASK_IDLE)
8821 return false;
8822
8823 return true;
8824 }
8825
8826
show_state_filter(unsigned int state_filter)8827 void show_state_filter(unsigned int state_filter)
8828 {
8829 struct task_struct *g, *p;
8830
8831 rcu_read_lock();
8832 for_each_process_thread(g, p) {
8833 /*
8834 * reset the NMI-timeout, listing all files on a slow
8835 * console might take a lot of time:
8836 * Also, reset softlockup watchdogs on all CPUs, because
8837 * another CPU might be blocked waiting for us to process
8838 * an IPI.
8839 */
8840 touch_nmi_watchdog();
8841 touch_all_softlockup_watchdogs();
8842 if (state_filter_match(state_filter, p))
8843 sched_show_task(p);
8844 }
8845
8846 #ifdef CONFIG_SCHED_DEBUG
8847 if (!state_filter)
8848 sysrq_sched_debug_show();
8849 #endif
8850 rcu_read_unlock();
8851 /*
8852 * Only show locks if all tasks are dumped:
8853 */
8854 if (!state_filter)
8855 debug_show_all_locks();
8856 }
8857
8858 /**
8859 * init_idle - set up an idle thread for a given CPU
8860 * @idle: task in question
8861 * @cpu: CPU the idle task belongs to
8862 *
8863 * NOTE: this function does not set the idle thread's NEED_RESCHED
8864 * flag, to make booting more robust.
8865 */
init_idle(struct task_struct * idle,int cpu)8866 void __init init_idle(struct task_struct *idle, int cpu)
8867 {
8868 struct rq *rq = cpu_rq(cpu);
8869 unsigned long flags;
8870
8871 __sched_fork(0, idle);
8872
8873 /*
8874 * The idle task doesn't need the kthread struct to function, but it
8875 * is dressed up as a per-CPU kthread and thus needs to play the part
8876 * if we want to avoid special-casing it in code that deals with per-CPU
8877 * kthreads.
8878 */
8879 set_kthread_struct(idle);
8880
8881 raw_spin_lock_irqsave(&idle->pi_lock, flags);
8882 raw_spin_rq_lock(rq);
8883
8884 idle->__state = TASK_RUNNING;
8885 idle->se.exec_start = sched_clock();
8886 /*
8887 * PF_KTHREAD should already be set at this point; regardless, make it
8888 * look like a proper per-CPU kthread.
8889 */
8890 idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
8891 kthread_set_per_cpu(idle, cpu);
8892
8893 #ifdef CONFIG_SMP
8894 /*
8895 * It's possible that init_idle() gets called multiple times on a task,
8896 * in that case do_set_cpus_allowed() will not do the right thing.
8897 *
8898 * And since this is boot we can forgo the serialization.
8899 */
8900 set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
8901 #endif
8902 /*
8903 * We're having a chicken and egg problem, even though we are
8904 * holding rq->lock, the CPU isn't yet set to this CPU so the
8905 * lockdep check in task_group() will fail.
8906 *
8907 * Similar case to sched_fork(). / Alternatively we could
8908 * use task_rq_lock() here and obtain the other rq->lock.
8909 *
8910 * Silence PROVE_RCU
8911 */
8912 rcu_read_lock();
8913 __set_task_cpu(idle, cpu);
8914 rcu_read_unlock();
8915
8916 rq->idle = idle;
8917 rcu_assign_pointer(rq->curr, idle);
8918 idle->on_rq = TASK_ON_RQ_QUEUED;
8919 #ifdef CONFIG_SMP
8920 idle->on_cpu = 1;
8921 #endif
8922 raw_spin_rq_unlock(rq);
8923 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
8924
8925 /* Set the preempt count _outside_ the spinlocks! */
8926 init_idle_preempt_count(idle, cpu);
8927
8928 /*
8929 * The idle tasks have their own, simple scheduling class:
8930 */
8931 idle->sched_class = &idle_sched_class;
8932 ftrace_graph_init_idle_task(idle, cpu);
8933 vtime_init_idle(idle, cpu);
8934 #ifdef CONFIG_SMP
8935 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
8936 #endif
8937 }
8938
8939 #ifdef CONFIG_SMP
8940
cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)8941 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
8942 const struct cpumask *trial)
8943 {
8944 int ret = 1;
8945
8946 if (!cpumask_weight(cur))
8947 return ret;
8948
8949 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
8950
8951 return ret;
8952 }
8953
task_can_attach(struct task_struct * p)8954 int task_can_attach(struct task_struct *p)
8955 {
8956 int ret = 0;
8957
8958 /*
8959 * Kthreads which disallow setaffinity shouldn't be moved
8960 * to a new cpuset; we don't want to change their CPU
8961 * affinity and isolating such threads by their set of
8962 * allowed nodes is unnecessary. Thus, cpusets are not
8963 * applicable for such threads. This prevents checking for
8964 * success of set_cpus_allowed_ptr() on all attached tasks
8965 * before cpus_mask may be changed.
8966 */
8967 if (p->flags & PF_NO_SETAFFINITY)
8968 ret = -EINVAL;
8969
8970 return ret;
8971 }
8972
8973 bool sched_smp_initialized __read_mostly;
8974
8975 #ifdef CONFIG_NUMA_BALANCING
8976 /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)8977 int migrate_task_to(struct task_struct *p, int target_cpu)
8978 {
8979 struct migration_arg arg = { p, target_cpu };
8980 int curr_cpu = task_cpu(p);
8981
8982 if (curr_cpu == target_cpu)
8983 return 0;
8984
8985 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
8986 return -EINVAL;
8987
8988 /* TODO: This is not properly updating schedstats */
8989
8990 trace_sched_move_numa(p, curr_cpu, target_cpu);
8991 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
8992 }
8993
8994 /*
8995 * Requeue a task on a given node and accurately track the number of NUMA
8996 * tasks on the runqueues
8997 */
sched_setnuma(struct task_struct * p,int nid)8998 void sched_setnuma(struct task_struct *p, int nid)
8999 {
9000 bool queued, running;
9001 struct rq_flags rf;
9002 struct rq *rq;
9003
9004 rq = task_rq_lock(p, &rf);
9005 queued = task_on_rq_queued(p);
9006 running = task_current(rq, p);
9007
9008 if (queued)
9009 dequeue_task(rq, p, DEQUEUE_SAVE);
9010 if (running)
9011 put_prev_task(rq, p);
9012
9013 p->numa_preferred_nid = nid;
9014
9015 if (queued)
9016 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
9017 if (running)
9018 set_next_task(rq, p);
9019 task_rq_unlock(rq, p, &rf);
9020 }
9021 #endif /* CONFIG_NUMA_BALANCING */
9022
9023 #ifdef CONFIG_HOTPLUG_CPU
9024 /*
9025 * Ensure that the idle task is using init_mm right before its CPU goes
9026 * offline.
9027 */
idle_task_exit(void)9028 void idle_task_exit(void)
9029 {
9030 struct mm_struct *mm = current->active_mm;
9031
9032 BUG_ON(cpu_online(smp_processor_id()));
9033 BUG_ON(current != this_rq()->idle);
9034
9035 if (mm != &init_mm) {
9036 switch_mm(mm, &init_mm, current);
9037 finish_arch_post_lock_switch();
9038 }
9039
9040 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
9041 }
9042
pick_migrate_task(struct rq * rq)9043 struct task_struct *pick_migrate_task(struct rq *rq)
9044 {
9045 const struct sched_class *class;
9046 struct task_struct *next;
9047
9048 for_each_class(class) {
9049 next = class->pick_next_task(rq);
9050 if (next) {
9051 next->sched_class->put_prev_task(rq, next);
9052 return next;
9053 }
9054 }
9055
9056 /* The idle class should always have a runnable task */
9057 BUG();
9058 }
9059 EXPORT_SYMBOL_GPL(pick_migrate_task);
9060
__balance_push_cpu_stop(void * arg)9061 static int __balance_push_cpu_stop(void *arg)
9062 {
9063 struct task_struct *p = arg;
9064 struct rq *rq = this_rq();
9065 struct rq_flags rf;
9066 int cpu;
9067
9068 raw_spin_lock_irq(&p->pi_lock);
9069 rq_lock(rq, &rf);
9070
9071 update_rq_clock(rq);
9072
9073 if (task_rq(p) == rq && task_on_rq_queued(p)) {
9074 cpu = select_fallback_rq(rq->cpu, p);
9075 rq = __migrate_task(rq, &rf, p, cpu);
9076 }
9077
9078 rq_unlock(rq, &rf);
9079 raw_spin_unlock_irq(&p->pi_lock);
9080
9081 put_task_struct(p);
9082
9083 return 0;
9084 }
9085
9086 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
9087
9088 /*
9089 * Ensure we only run per-cpu kthreads once the CPU goes !active.
9090 *
9091 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
9092 * effective when the hotplug motion is down.
9093 */
balance_push(struct rq * rq)9094 static void balance_push(struct rq *rq)
9095 {
9096 struct task_struct *push_task = rq->curr;
9097
9098 lockdep_assert_rq_held(rq);
9099
9100 /*
9101 * Ensure the thing is persistent until balance_push_set(.on = false);
9102 */
9103 rq->balance_callback = &balance_push_callback;
9104
9105 /*
9106 * Only active while going offline and when invoked on the outgoing
9107 * CPU.
9108 */
9109 if (!cpu_dying(rq->cpu) || rq != this_rq())
9110 return;
9111
9112 /*
9113 * Both the cpu-hotplug and stop task are in this case and are
9114 * required to complete the hotplug process.
9115 */
9116 if (kthread_is_per_cpu(push_task) ||
9117 is_migration_disabled(push_task)) {
9118
9119 /*
9120 * If this is the idle task on the outgoing CPU try to wake
9121 * up the hotplug control thread which might wait for the
9122 * last task to vanish. The rcuwait_active() check is
9123 * accurate here because the waiter is pinned on this CPU
9124 * and can't obviously be running in parallel.
9125 *
9126 * On RT kernels this also has to check whether there are
9127 * pinned and scheduled out tasks on the runqueue. They
9128 * need to leave the migrate disabled section first.
9129 */
9130 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
9131 rcuwait_active(&rq->hotplug_wait)) {
9132 raw_spin_rq_unlock(rq);
9133 rcuwait_wake_up(&rq->hotplug_wait);
9134 raw_spin_rq_lock(rq);
9135 }
9136 return;
9137 }
9138
9139 get_task_struct(push_task);
9140 /*
9141 * Temporarily drop rq->lock such that we can wake-up the stop task.
9142 * Both preemption and IRQs are still disabled.
9143 */
9144 preempt_disable();
9145 raw_spin_rq_unlock(rq);
9146 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
9147 this_cpu_ptr(&push_work));
9148 preempt_enable();
9149 /*
9150 * At this point need_resched() is true and we'll take the loop in
9151 * schedule(). The next pick is obviously going to be the stop task
9152 * which kthread_is_per_cpu() and will push this task away.
9153 */
9154 raw_spin_rq_lock(rq);
9155 }
9156
balance_push_set(int cpu,bool on)9157 static void balance_push_set(int cpu, bool on)
9158 {
9159 struct rq *rq = cpu_rq(cpu);
9160 struct rq_flags rf;
9161
9162 rq_lock_irqsave(rq, &rf);
9163 if (on) {
9164 WARN_ON_ONCE(rq->balance_callback);
9165 rq->balance_callback = &balance_push_callback;
9166 } else if (rq->balance_callback == &balance_push_callback) {
9167 rq->balance_callback = NULL;
9168 }
9169 rq_unlock_irqrestore(rq, &rf);
9170 }
9171
9172 /*
9173 * Invoked from a CPUs hotplug control thread after the CPU has been marked
9174 * inactive. All tasks which are not per CPU kernel threads are either
9175 * pushed off this CPU now via balance_push() or placed on a different CPU
9176 * during wakeup. Wait until the CPU is quiescent.
9177 */
balance_hotplug_wait(void)9178 static void balance_hotplug_wait(void)
9179 {
9180 struct rq *rq = this_rq();
9181
9182 rcuwait_wait_event(&rq->hotplug_wait,
9183 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
9184 TASK_UNINTERRUPTIBLE);
9185 }
9186
9187 #else
9188
balance_push(struct rq * rq)9189 static inline void balance_push(struct rq *rq)
9190 {
9191 }
9192
balance_push_set(int cpu,bool on)9193 static inline void balance_push_set(int cpu, bool on)
9194 {
9195 }
9196
balance_hotplug_wait(void)9197 static inline void balance_hotplug_wait(void)
9198 {
9199 }
9200
9201 #endif /* CONFIG_HOTPLUG_CPU */
9202
set_rq_online(struct rq * rq)9203 void set_rq_online(struct rq *rq)
9204 {
9205 if (!rq->online) {
9206 const struct sched_class *class;
9207
9208 cpumask_set_cpu(rq->cpu, rq->rd->online);
9209 rq->online = 1;
9210
9211 for_each_class(class) {
9212 if (class->rq_online)
9213 class->rq_online(rq);
9214 }
9215 }
9216 }
9217
set_rq_offline(struct rq * rq)9218 void set_rq_offline(struct rq *rq)
9219 {
9220 if (rq->online) {
9221 const struct sched_class *class;
9222
9223 for_each_class(class) {
9224 if (class->rq_offline)
9225 class->rq_offline(rq);
9226 }
9227
9228 cpumask_clear_cpu(rq->cpu, rq->rd->online);
9229 rq->online = 0;
9230 }
9231 }
9232
9233 /*
9234 * used to mark begin/end of suspend/resume:
9235 */
9236 static int num_cpus_frozen;
9237
9238 /*
9239 * Update cpusets according to cpu_active mask. If cpusets are
9240 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
9241 * around partition_sched_domains().
9242 *
9243 * If we come here as part of a suspend/resume, don't touch cpusets because we
9244 * want to restore it back to its original state upon resume anyway.
9245 */
cpuset_cpu_active(void)9246 static void cpuset_cpu_active(void)
9247 {
9248 if (cpuhp_tasks_frozen) {
9249 /*
9250 * num_cpus_frozen tracks how many CPUs are involved in suspend
9251 * resume sequence. As long as this is not the last online
9252 * operation in the resume sequence, just build a single sched
9253 * domain, ignoring cpusets.
9254 */
9255 partition_sched_domains(1, NULL, NULL);
9256 if (--num_cpus_frozen)
9257 return;
9258 /*
9259 * This is the last CPU online operation. So fall through and
9260 * restore the original sched domains by considering the
9261 * cpuset configurations.
9262 */
9263 cpuset_force_rebuild();
9264 }
9265 cpuset_update_active_cpus();
9266 }
9267
cpuset_cpu_inactive(unsigned int cpu)9268 static int cpuset_cpu_inactive(unsigned int cpu)
9269 {
9270 if (!cpuhp_tasks_frozen) {
9271 int ret = dl_bw_check_overflow(cpu);
9272
9273 if (ret)
9274 return ret;
9275 cpuset_update_active_cpus();
9276 } else {
9277 num_cpus_frozen++;
9278 partition_sched_domains(1, NULL, NULL);
9279 }
9280 return 0;
9281 }
9282
sched_cpu_activate(unsigned int cpu)9283 int sched_cpu_activate(unsigned int cpu)
9284 {
9285 struct rq *rq = cpu_rq(cpu);
9286 struct rq_flags rf;
9287
9288 /*
9289 * Clear the balance_push callback and prepare to schedule
9290 * regular tasks.
9291 */
9292 balance_push_set(cpu, false);
9293
9294 #ifdef CONFIG_SCHED_SMT
9295 /*
9296 * When going up, increment the number of cores with SMT present.
9297 */
9298 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
9299 static_branch_inc_cpuslocked(&sched_smt_present);
9300 #endif
9301 set_cpu_active(cpu, true);
9302
9303 if (sched_smp_initialized) {
9304 sched_domains_numa_masks_set(cpu);
9305 cpuset_cpu_active();
9306 }
9307
9308 /*
9309 * Put the rq online, if not already. This happens:
9310 *
9311 * 1) In the early boot process, because we build the real domains
9312 * after all CPUs have been brought up.
9313 *
9314 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
9315 * domains.
9316 */
9317 rq_lock_irqsave(rq, &rf);
9318 if (rq->rd) {
9319 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9320 set_rq_online(rq);
9321 }
9322 rq_unlock_irqrestore(rq, &rf);
9323
9324 return 0;
9325 }
9326
sched_cpu_deactivate(unsigned int cpu)9327 int sched_cpu_deactivate(unsigned int cpu)
9328 {
9329 struct rq *rq = cpu_rq(cpu);
9330 struct rq_flags rf;
9331 int ret;
9332
9333 /*
9334 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
9335 * load balancing when not active
9336 */
9337 nohz_balance_exit_idle(rq);
9338
9339 set_cpu_active(cpu, false);
9340
9341 /*
9342 * From this point forward, this CPU will refuse to run any task that
9343 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
9344 * push those tasks away until this gets cleared, see
9345 * sched_cpu_dying().
9346 */
9347 balance_push_set(cpu, true);
9348
9349 /*
9350 * We've cleared cpu_active_mask / set balance_push, wait for all
9351 * preempt-disabled and RCU users of this state to go away such that
9352 * all new such users will observe it.
9353 *
9354 * Specifically, we rely on ttwu to no longer target this CPU, see
9355 * ttwu_queue_cond() and is_cpu_allowed().
9356 *
9357 * Do sync before park smpboot threads to take care the rcu boost case.
9358 */
9359 synchronize_rcu();
9360
9361 rq_lock_irqsave(rq, &rf);
9362 if (rq->rd) {
9363 update_rq_clock(rq);
9364 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9365 set_rq_offline(rq);
9366 }
9367 rq_unlock_irqrestore(rq, &rf);
9368
9369 #ifdef CONFIG_SCHED_SMT
9370 /*
9371 * When going down, decrement the number of cores with SMT present.
9372 */
9373 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
9374 static_branch_dec_cpuslocked(&sched_smt_present);
9375
9376 sched_core_cpu_deactivate(cpu);
9377 #endif
9378
9379 if (!sched_smp_initialized)
9380 return 0;
9381
9382 ret = cpuset_cpu_inactive(cpu);
9383 if (ret) {
9384 balance_push_set(cpu, false);
9385 set_cpu_active(cpu, true);
9386 return ret;
9387 }
9388 sched_domains_numa_masks_clear(cpu);
9389 return 0;
9390 }
9391
sched_rq_cpu_starting(unsigned int cpu)9392 static void sched_rq_cpu_starting(unsigned int cpu)
9393 {
9394 struct rq *rq = cpu_rq(cpu);
9395
9396 rq->calc_load_update = calc_load_update;
9397 update_max_interval();
9398 }
9399
sched_cpu_starting(unsigned int cpu)9400 int sched_cpu_starting(unsigned int cpu)
9401 {
9402 sched_core_cpu_starting(cpu);
9403 sched_rq_cpu_starting(cpu);
9404 sched_tick_start(cpu);
9405 trace_android_rvh_sched_cpu_starting(cpu);
9406 return 0;
9407 }
9408
9409 #ifdef CONFIG_HOTPLUG_CPU
9410
9411 /*
9412 * Invoked immediately before the stopper thread is invoked to bring the
9413 * CPU down completely. At this point all per CPU kthreads except the
9414 * hotplug thread (current) and the stopper thread (inactive) have been
9415 * either parked or have been unbound from the outgoing CPU. Ensure that
9416 * any of those which might be on the way out are gone.
9417 *
9418 * If after this point a bound task is being woken on this CPU then the
9419 * responsible hotplug callback has failed to do it's job.
9420 * sched_cpu_dying() will catch it with the appropriate fireworks.
9421 */
sched_cpu_wait_empty(unsigned int cpu)9422 int sched_cpu_wait_empty(unsigned int cpu)
9423 {
9424 balance_hotplug_wait();
9425 return 0;
9426 }
9427
9428 /*
9429 * Since this CPU is going 'away' for a while, fold any nr_active delta we
9430 * might have. Called from the CPU stopper task after ensuring that the
9431 * stopper is the last running task on the CPU, so nr_active count is
9432 * stable. We need to take the teardown thread which is calling this into
9433 * account, so we hand in adjust = 1 to the load calculation.
9434 *
9435 * Also see the comment "Global load-average calculations".
9436 */
calc_load_migrate(struct rq * rq)9437 static void calc_load_migrate(struct rq *rq)
9438 {
9439 long delta = calc_load_fold_active(rq, 1);
9440
9441 if (delta)
9442 atomic_long_add(delta, &calc_load_tasks);
9443 }
9444
dump_rq_tasks(struct rq * rq,const char * loglvl)9445 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
9446 {
9447 struct task_struct *g, *p;
9448 int cpu = cpu_of(rq);
9449
9450 lockdep_assert_rq_held(rq);
9451
9452 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
9453 for_each_process_thread(g, p) {
9454 if (task_cpu(p) != cpu)
9455 continue;
9456
9457 if (!task_on_rq_queued(p))
9458 continue;
9459
9460 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
9461 }
9462 }
9463
sched_cpu_dying(unsigned int cpu)9464 int sched_cpu_dying(unsigned int cpu)
9465 {
9466 struct rq *rq = cpu_rq(cpu);
9467 struct rq_flags rf;
9468
9469 /* Handle pending wakeups and then migrate everything off */
9470 sched_tick_stop(cpu);
9471
9472 rq_lock_irqsave(rq, &rf);
9473 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
9474 WARN(true, "Dying CPU not properly vacated!");
9475 dump_rq_tasks(rq, KERN_WARNING);
9476 }
9477 rq_unlock_irqrestore(rq, &rf);
9478
9479 trace_android_rvh_sched_cpu_dying(cpu);
9480
9481 calc_load_migrate(rq);
9482 update_max_interval();
9483 hrtick_clear(rq);
9484 sched_core_cpu_dying(cpu);
9485 return 0;
9486 }
9487 #endif
9488
sched_init_smp(void)9489 void __init sched_init_smp(void)
9490 {
9491 sched_init_numa();
9492
9493 /*
9494 * There's no userspace yet to cause hotplug operations; hence all the
9495 * CPU masks are stable and all blatant races in the below code cannot
9496 * happen.
9497 */
9498 mutex_lock(&sched_domains_mutex);
9499 sched_init_domains(cpu_active_mask);
9500 mutex_unlock(&sched_domains_mutex);
9501
9502 /* Move init over to a non-isolated CPU */
9503 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
9504 BUG();
9505 current->flags &= ~PF_NO_SETAFFINITY;
9506 sched_init_granularity();
9507
9508 init_sched_rt_class();
9509 init_sched_dl_class();
9510
9511 sched_smp_initialized = true;
9512 }
9513
migration_init(void)9514 static int __init migration_init(void)
9515 {
9516 sched_cpu_starting(smp_processor_id());
9517 return 0;
9518 }
9519 early_initcall(migration_init);
9520
9521 #else
sched_init_smp(void)9522 void __init sched_init_smp(void)
9523 {
9524 sched_init_granularity();
9525 }
9526 #endif /* CONFIG_SMP */
9527
in_sched_functions(unsigned long addr)9528 int in_sched_functions(unsigned long addr)
9529 {
9530 return in_lock_functions(addr) ||
9531 (addr >= (unsigned long)__sched_text_start
9532 && addr < (unsigned long)__sched_text_end);
9533 }
9534
9535 #ifdef CONFIG_CGROUP_SCHED
9536 /*
9537 * Default task group.
9538 * Every task in system belongs to this group at bootup.
9539 */
9540 struct task_group root_task_group;
9541 EXPORT_SYMBOL_GPL(root_task_group);
9542 LIST_HEAD(task_groups);
9543 EXPORT_SYMBOL_GPL(task_groups);
9544
9545 /* Cacheline aligned slab cache for task_group */
9546 static struct kmem_cache *task_group_cache __read_mostly;
9547 #endif
9548
9549 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
9550 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
9551
sched_init(void)9552 void __init sched_init(void)
9553 {
9554 unsigned long ptr = 0;
9555 int i;
9556
9557 /* Make sure the linker didn't screw up */
9558 BUG_ON(&idle_sched_class + 1 != &fair_sched_class ||
9559 &fair_sched_class + 1 != &rt_sched_class ||
9560 &rt_sched_class + 1 != &dl_sched_class);
9561 #ifdef CONFIG_SMP
9562 BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
9563 #endif
9564
9565 wait_bit_init();
9566
9567 #ifdef CONFIG_FAIR_GROUP_SCHED
9568 ptr += 2 * nr_cpu_ids * sizeof(void **);
9569 #endif
9570 #ifdef CONFIG_RT_GROUP_SCHED
9571 ptr += 2 * nr_cpu_ids * sizeof(void **);
9572 #endif
9573 if (ptr) {
9574 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
9575
9576 #ifdef CONFIG_FAIR_GROUP_SCHED
9577 root_task_group.se = (struct sched_entity **)ptr;
9578 ptr += nr_cpu_ids * sizeof(void **);
9579
9580 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
9581 ptr += nr_cpu_ids * sizeof(void **);
9582
9583 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
9584 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
9585 #endif /* CONFIG_FAIR_GROUP_SCHED */
9586 #ifdef CONFIG_RT_GROUP_SCHED
9587 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
9588 ptr += nr_cpu_ids * sizeof(void **);
9589
9590 root_task_group.rt_rq = (struct rt_rq **)ptr;
9591 ptr += nr_cpu_ids * sizeof(void **);
9592
9593 #endif /* CONFIG_RT_GROUP_SCHED */
9594 }
9595 #ifdef CONFIG_CPUMASK_OFFSTACK
9596 for_each_possible_cpu(i) {
9597 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
9598 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
9599 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
9600 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
9601 }
9602 #endif /* CONFIG_CPUMASK_OFFSTACK */
9603
9604 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
9605 init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime());
9606
9607 #ifdef CONFIG_SMP
9608 init_defrootdomain();
9609 #endif
9610
9611 #ifdef CONFIG_RT_GROUP_SCHED
9612 init_rt_bandwidth(&root_task_group.rt_bandwidth,
9613 global_rt_period(), global_rt_runtime());
9614 #endif /* CONFIG_RT_GROUP_SCHED */
9615
9616 #ifdef CONFIG_CGROUP_SCHED
9617 task_group_cache = KMEM_CACHE(task_group, 0);
9618
9619 list_add(&root_task_group.list, &task_groups);
9620 INIT_LIST_HEAD(&root_task_group.children);
9621 INIT_LIST_HEAD(&root_task_group.siblings);
9622 autogroup_init(&init_task);
9623 #endif /* CONFIG_CGROUP_SCHED */
9624
9625 for_each_possible_cpu(i) {
9626 struct rq *rq;
9627
9628 rq = cpu_rq(i);
9629 raw_spin_lock_init(&rq->__lock);
9630 rq->nr_running = 0;
9631 rq->calc_load_active = 0;
9632 rq->calc_load_update = jiffies + LOAD_FREQ;
9633 init_cfs_rq(&rq->cfs);
9634 init_rt_rq(&rq->rt);
9635 init_dl_rq(&rq->dl);
9636 #ifdef CONFIG_FAIR_GROUP_SCHED
9637 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
9638 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
9639 /*
9640 * How much CPU bandwidth does root_task_group get?
9641 *
9642 * In case of task-groups formed thr' the cgroup filesystem, it
9643 * gets 100% of the CPU resources in the system. This overall
9644 * system CPU resource is divided among the tasks of
9645 * root_task_group and its child task-groups in a fair manner,
9646 * based on each entity's (task or task-group's) weight
9647 * (se->load.weight).
9648 *
9649 * In other words, if root_task_group has 10 tasks of weight
9650 * 1024) and two child groups A0 and A1 (of weight 1024 each),
9651 * then A0's share of the CPU resource is:
9652 *
9653 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
9654 *
9655 * We achieve this by letting root_task_group's tasks sit
9656 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
9657 */
9658 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
9659 #endif /* CONFIG_FAIR_GROUP_SCHED */
9660
9661 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
9662 #ifdef CONFIG_RT_GROUP_SCHED
9663 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
9664 #endif
9665 #ifdef CONFIG_SMP
9666 rq->sd = NULL;
9667 rq->rd = NULL;
9668 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
9669 rq->balance_callback = &balance_push_callback;
9670 rq->active_balance = 0;
9671 rq->next_balance = jiffies;
9672 rq->push_cpu = 0;
9673 rq->cpu = i;
9674 rq->online = 0;
9675 rq->idle_stamp = 0;
9676 rq->avg_idle = 2*sysctl_sched_migration_cost;
9677 rq->wake_stamp = jiffies;
9678 rq->wake_avg_idle = rq->avg_idle;
9679 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
9680
9681 INIT_LIST_HEAD(&rq->cfs_tasks);
9682
9683 rq_attach_root(rq, &def_root_domain);
9684 #ifdef CONFIG_NO_HZ_COMMON
9685 rq->last_blocked_load_update_tick = jiffies;
9686 atomic_set(&rq->nohz_flags, 0);
9687
9688 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
9689 #endif
9690 #ifdef CONFIG_HOTPLUG_CPU
9691 rcuwait_init(&rq->hotplug_wait);
9692 #endif
9693 #endif /* CONFIG_SMP */
9694 hrtick_rq_init(rq);
9695 atomic_set(&rq->nr_iowait, 0);
9696
9697 #ifdef CONFIG_SCHED_CORE
9698 rq->core = rq;
9699 rq->core_pick = NULL;
9700 rq->core_enabled = 0;
9701 rq->core_tree = RB_ROOT;
9702 rq->core_forceidle = false;
9703
9704 rq->core_cookie = 0UL;
9705 #endif
9706 }
9707
9708 set_load_weight(&init_task, false);
9709
9710 /*
9711 * The boot idle thread does lazy MMU switching as well:
9712 */
9713 mmgrab(&init_mm);
9714 enter_lazy_tlb(&init_mm, current);
9715
9716 /*
9717 * Make us the idle thread. Technically, schedule() should not be
9718 * called from this thread, however somewhere below it might be,
9719 * but because we are the idle thread, we just pick up running again
9720 * when this runqueue becomes "idle".
9721 */
9722 init_idle(current, smp_processor_id());
9723
9724 calc_load_update = jiffies + LOAD_FREQ;
9725
9726 #ifdef CONFIG_SMP
9727 idle_thread_set_boot_cpu();
9728 balance_push_set(smp_processor_id(), false);
9729 #endif
9730 init_sched_fair_class();
9731
9732 psi_init();
9733
9734 init_uclamp();
9735
9736 scheduler_running = 1;
9737 }
9738
9739 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
preempt_count_equals(int preempt_offset)9740 static inline int preempt_count_equals(int preempt_offset)
9741 {
9742 int nested = preempt_count() + rcu_preempt_depth();
9743
9744 return (nested == preempt_offset);
9745 }
9746
__might_sleep(const char * file,int line,int preempt_offset)9747 void __might_sleep(const char *file, int line, int preempt_offset)
9748 {
9749 unsigned int state = get_current_state();
9750 /*
9751 * Blocking primitives will set (and therefore destroy) current->state,
9752 * since we will exit with TASK_RUNNING make sure we enter with it,
9753 * otherwise we will destroy state.
9754 */
9755 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
9756 "do not call blocking ops when !TASK_RUNNING; "
9757 "state=%x set at [<%p>] %pS\n", state,
9758 (void *)current->task_state_change,
9759 (void *)current->task_state_change);
9760
9761 ___might_sleep(file, line, preempt_offset);
9762 }
9763 EXPORT_SYMBOL(__might_sleep);
9764
___might_sleep(const char * file,int line,int preempt_offset)9765 void ___might_sleep(const char *file, int line, int preempt_offset)
9766 {
9767 /* Ratelimiting timestamp: */
9768 static unsigned long prev_jiffy;
9769
9770 unsigned long preempt_disable_ip;
9771
9772 /* WARN_ON_ONCE() by default, no rate limit required: */
9773 rcu_sleep_check();
9774
9775 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
9776 !is_idle_task(current) && !current->non_block_count) ||
9777 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
9778 oops_in_progress)
9779 return;
9780
9781 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
9782 return;
9783 prev_jiffy = jiffies;
9784
9785 /* Save this before calling printk(), since that will clobber it: */
9786 preempt_disable_ip = get_preempt_disable_ip(current);
9787
9788 printk(KERN_ERR
9789 "BUG: sleeping function called from invalid context at %s:%d\n",
9790 file, line);
9791 printk(KERN_ERR
9792 "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
9793 in_atomic(), irqs_disabled(), current->non_block_count,
9794 current->pid, current->comm);
9795
9796 if (task_stack_end_corrupted(current))
9797 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
9798
9799 debug_show_held_locks(current);
9800 if (irqs_disabled())
9801 print_irqtrace_events(current);
9802 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
9803 && !preempt_count_equals(preempt_offset)) {
9804 pr_err("Preemption disabled at:");
9805 print_ip_sym(KERN_ERR, preempt_disable_ip);
9806 }
9807
9808 trace_android_rvh_schedule_bug(NULL);
9809
9810 dump_stack();
9811 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
9812 }
9813 EXPORT_SYMBOL(___might_sleep);
9814
__cant_sleep(const char * file,int line,int preempt_offset)9815 void __cant_sleep(const char *file, int line, int preempt_offset)
9816 {
9817 static unsigned long prev_jiffy;
9818
9819 if (irqs_disabled())
9820 return;
9821
9822 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
9823 return;
9824
9825 if (preempt_count() > preempt_offset)
9826 return;
9827
9828 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
9829 return;
9830 prev_jiffy = jiffies;
9831
9832 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
9833 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
9834 in_atomic(), irqs_disabled(),
9835 current->pid, current->comm);
9836
9837 debug_show_held_locks(current);
9838 dump_stack();
9839 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
9840 }
9841 EXPORT_SYMBOL_GPL(__cant_sleep);
9842
9843 #ifdef CONFIG_SMP
__cant_migrate(const char * file,int line)9844 void __cant_migrate(const char *file, int line)
9845 {
9846 static unsigned long prev_jiffy;
9847
9848 if (irqs_disabled())
9849 return;
9850
9851 if (is_migration_disabled(current))
9852 return;
9853
9854 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
9855 return;
9856
9857 if (preempt_count() > 0)
9858 return;
9859
9860 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
9861 return;
9862 prev_jiffy = jiffies;
9863
9864 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
9865 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
9866 in_atomic(), irqs_disabled(), is_migration_disabled(current),
9867 current->pid, current->comm);
9868
9869 debug_show_held_locks(current);
9870 dump_stack();
9871 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
9872 }
9873 EXPORT_SYMBOL_GPL(__cant_migrate);
9874 #endif
9875 #endif
9876
9877 #ifdef CONFIG_MAGIC_SYSRQ
normalize_rt_tasks(void)9878 void normalize_rt_tasks(void)
9879 {
9880 struct task_struct *g, *p;
9881 struct sched_attr attr = {
9882 .sched_policy = SCHED_NORMAL,
9883 };
9884
9885 read_lock(&tasklist_lock);
9886 for_each_process_thread(g, p) {
9887 /*
9888 * Only normalize user tasks:
9889 */
9890 if (p->flags & PF_KTHREAD)
9891 continue;
9892
9893 p->se.exec_start = 0;
9894 schedstat_set(p->se.statistics.wait_start, 0);
9895 schedstat_set(p->se.statistics.sleep_start, 0);
9896 schedstat_set(p->se.statistics.block_start, 0);
9897
9898 if (!dl_task(p) && !rt_task(p)) {
9899 /*
9900 * Renice negative nice level userspace
9901 * tasks back to 0:
9902 */
9903 if (task_nice(p) < 0)
9904 set_user_nice(p, 0);
9905 continue;
9906 }
9907
9908 __sched_setscheduler(p, &attr, false, false);
9909 }
9910 read_unlock(&tasklist_lock);
9911 }
9912
9913 #endif /* CONFIG_MAGIC_SYSRQ */
9914
9915 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
9916 /*
9917 * These functions are only useful for the IA64 MCA handling, or kdb.
9918 *
9919 * They can only be called when the whole system has been
9920 * stopped - every CPU needs to be quiescent, and no scheduling
9921 * activity can take place. Using them for anything else would
9922 * be a serious bug, and as a result, they aren't even visible
9923 * under any other configuration.
9924 */
9925
9926 /**
9927 * curr_task - return the current task for a given CPU.
9928 * @cpu: the processor in question.
9929 *
9930 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
9931 *
9932 * Return: The current task for @cpu.
9933 */
curr_task(int cpu)9934 struct task_struct *curr_task(int cpu)
9935 {
9936 return cpu_curr(cpu);
9937 }
9938
9939 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
9940
9941 #ifdef CONFIG_IA64
9942 /**
9943 * ia64_set_curr_task - set the current task for a given CPU.
9944 * @cpu: the processor in question.
9945 * @p: the task pointer to set.
9946 *
9947 * Description: This function must only be used when non-maskable interrupts
9948 * are serviced on a separate stack. It allows the architecture to switch the
9949 * notion of the current task on a CPU in a non-blocking manner. This function
9950 * must be called with all CPU's synchronized, and interrupts disabled, the
9951 * and caller must save the original value of the current task (see
9952 * curr_task() above) and restore that value before reenabling interrupts and
9953 * re-starting the system.
9954 *
9955 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
9956 */
ia64_set_curr_task(int cpu,struct task_struct * p)9957 void ia64_set_curr_task(int cpu, struct task_struct *p)
9958 {
9959 cpu_curr(cpu) = p;
9960 }
9961
9962 #endif
9963
9964 #ifdef CONFIG_CGROUP_SCHED
9965 /* task_group_lock serializes the addition/removal of task groups */
9966 static DEFINE_SPINLOCK(task_group_lock);
9967
alloc_uclamp_sched_group(struct task_group * tg,struct task_group * parent)9968 static inline void alloc_uclamp_sched_group(struct task_group *tg,
9969 struct task_group *parent)
9970 {
9971 #ifdef CONFIG_UCLAMP_TASK_GROUP
9972 enum uclamp_id clamp_id;
9973
9974 for_each_clamp_id(clamp_id) {
9975 uclamp_se_set(&tg->uclamp_req[clamp_id],
9976 uclamp_none(clamp_id), false);
9977 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
9978 }
9979 #endif
9980 }
9981
sched_free_group(struct task_group * tg)9982 static void sched_free_group(struct task_group *tg)
9983 {
9984 free_fair_sched_group(tg);
9985 free_rt_sched_group(tg);
9986 autogroup_free(tg);
9987 kmem_cache_free(task_group_cache, tg);
9988 }
9989
sched_free_group_rcu(struct rcu_head * rcu)9990 static void sched_free_group_rcu(struct rcu_head *rcu)
9991 {
9992 sched_free_group(container_of(rcu, struct task_group, rcu));
9993 }
9994
sched_unregister_group(struct task_group * tg)9995 static void sched_unregister_group(struct task_group *tg)
9996 {
9997 unregister_fair_sched_group(tg);
9998 unregister_rt_sched_group(tg);
9999 /*
10000 * We have to wait for yet another RCU grace period to expire, as
10001 * print_cfs_stats() might run concurrently.
10002 */
10003 call_rcu(&tg->rcu, sched_free_group_rcu);
10004 }
10005
10006 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)10007 struct task_group *sched_create_group(struct task_group *parent)
10008 {
10009 struct task_group *tg;
10010
10011 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
10012 if (!tg)
10013 return ERR_PTR(-ENOMEM);
10014
10015 if (!alloc_fair_sched_group(tg, parent))
10016 goto err;
10017
10018 if (!alloc_rt_sched_group(tg, parent))
10019 goto err;
10020
10021 alloc_uclamp_sched_group(tg, parent);
10022
10023 return tg;
10024
10025 err:
10026 sched_free_group(tg);
10027 return ERR_PTR(-ENOMEM);
10028 }
10029
sched_online_group(struct task_group * tg,struct task_group * parent)10030 void sched_online_group(struct task_group *tg, struct task_group *parent)
10031 {
10032 unsigned long flags;
10033
10034 spin_lock_irqsave(&task_group_lock, flags);
10035 list_add_rcu(&tg->list, &task_groups);
10036
10037 /* Root should already exist: */
10038 WARN_ON(!parent);
10039
10040 tg->parent = parent;
10041 INIT_LIST_HEAD(&tg->children);
10042 list_add_rcu(&tg->siblings, &parent->children);
10043 spin_unlock_irqrestore(&task_group_lock, flags);
10044
10045 online_fair_sched_group(tg);
10046 }
10047
10048 /* rcu callback to free various structures associated with a task group */
sched_unregister_group_rcu(struct rcu_head * rhp)10049 static void sched_unregister_group_rcu(struct rcu_head *rhp)
10050 {
10051 /* Now it should be safe to free those cfs_rqs: */
10052 sched_unregister_group(container_of(rhp, struct task_group, rcu));
10053 }
10054
sched_destroy_group(struct task_group * tg)10055 void sched_destroy_group(struct task_group *tg)
10056 {
10057 /* Wait for possible concurrent references to cfs_rqs complete: */
10058 call_rcu(&tg->rcu, sched_unregister_group_rcu);
10059 }
10060
sched_release_group(struct task_group * tg)10061 void sched_release_group(struct task_group *tg)
10062 {
10063 unsigned long flags;
10064
10065 /*
10066 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
10067 * sched_cfs_period_timer()).
10068 *
10069 * For this to be effective, we have to wait for all pending users of
10070 * this task group to leave their RCU critical section to ensure no new
10071 * user will see our dying task group any more. Specifically ensure
10072 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
10073 *
10074 * We therefore defer calling unregister_fair_sched_group() to
10075 * sched_unregister_group() which is guarantied to get called only after the
10076 * current RCU grace period has expired.
10077 */
10078 spin_lock_irqsave(&task_group_lock, flags);
10079 list_del_rcu(&tg->list);
10080 list_del_rcu(&tg->siblings);
10081 spin_unlock_irqrestore(&task_group_lock, flags);
10082 }
10083
sched_change_group(struct task_struct * tsk,int type)10084 static void sched_change_group(struct task_struct *tsk, int type)
10085 {
10086 struct task_group *tg;
10087
10088 /*
10089 * All callers are synchronized by task_rq_lock(); we do not use RCU
10090 * which is pointless here. Thus, we pass "true" to task_css_check()
10091 * to prevent lockdep warnings.
10092 */
10093 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
10094 struct task_group, css);
10095 tg = autogroup_task_group(tsk, tg);
10096 tsk->sched_task_group = tg;
10097
10098 #ifdef CONFIG_FAIR_GROUP_SCHED
10099 if (tsk->sched_class->task_change_group)
10100 tsk->sched_class->task_change_group(tsk, type);
10101 else
10102 #endif
10103 set_task_rq(tsk, task_cpu(tsk));
10104 }
10105
10106 /*
10107 * Change task's runqueue when it moves between groups.
10108 *
10109 * The caller of this function should have put the task in its new group by
10110 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
10111 * its new group.
10112 */
sched_move_task(struct task_struct * tsk)10113 void sched_move_task(struct task_struct *tsk)
10114 {
10115 int queued, running, queue_flags =
10116 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
10117 struct rq_flags rf;
10118 struct rq *rq;
10119
10120 rq = task_rq_lock(tsk, &rf);
10121 update_rq_clock(rq);
10122
10123 running = task_current(rq, tsk);
10124 queued = task_on_rq_queued(tsk);
10125
10126 if (queued)
10127 dequeue_task(rq, tsk, queue_flags);
10128 if (running)
10129 put_prev_task(rq, tsk);
10130
10131 sched_change_group(tsk, TASK_MOVE_GROUP);
10132
10133 if (queued)
10134 enqueue_task(rq, tsk, queue_flags);
10135 if (running) {
10136 set_next_task(rq, tsk);
10137 /*
10138 * After changing group, the running task may have joined a
10139 * throttled one but it's still the running task. Trigger a
10140 * resched to make sure that task can still run.
10141 */
10142 resched_curr(rq);
10143 }
10144
10145 task_rq_unlock(rq, tsk, &rf);
10146 }
10147
css_tg(struct cgroup_subsys_state * css)10148 static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
10149 {
10150 return css ? container_of(css, struct task_group, css) : NULL;
10151 }
10152
10153 static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)10154 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
10155 {
10156 struct task_group *parent = css_tg(parent_css);
10157 struct task_group *tg;
10158
10159 if (!parent) {
10160 /* This is early initialization for the top cgroup */
10161 return &root_task_group.css;
10162 }
10163
10164 tg = sched_create_group(parent);
10165 if (IS_ERR(tg))
10166 return ERR_PTR(-ENOMEM);
10167
10168 return &tg->css;
10169 }
10170
10171 /* Expose task group only after completing cgroup initialization */
cpu_cgroup_css_online(struct cgroup_subsys_state * css)10172 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
10173 {
10174 struct task_group *tg = css_tg(css);
10175 struct task_group *parent = css_tg(css->parent);
10176
10177 if (parent)
10178 sched_online_group(tg, parent);
10179
10180 #ifdef CONFIG_UCLAMP_TASK_GROUP
10181 /* Propagate the effective uclamp value for the new group */
10182 mutex_lock(&uclamp_mutex);
10183 rcu_read_lock();
10184 cpu_util_update_eff(css);
10185 rcu_read_unlock();
10186 mutex_unlock(&uclamp_mutex);
10187 #endif
10188
10189 trace_android_rvh_cpu_cgroup_online(css);
10190 return 0;
10191 }
10192
cpu_cgroup_css_released(struct cgroup_subsys_state * css)10193 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
10194 {
10195 struct task_group *tg = css_tg(css);
10196
10197 sched_release_group(tg);
10198 }
10199
cpu_cgroup_css_free(struct cgroup_subsys_state * css)10200 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
10201 {
10202 struct task_group *tg = css_tg(css);
10203
10204 /*
10205 * Relies on the RCU grace period between css_released() and this.
10206 */
10207 sched_unregister_group(tg);
10208 }
10209
10210 /*
10211 * This is called before wake_up_new_task(), therefore we really only
10212 * have to set its group bits, all the other stuff does not apply.
10213 */
cpu_cgroup_fork(struct task_struct * task)10214 static void cpu_cgroup_fork(struct task_struct *task)
10215 {
10216 struct rq_flags rf;
10217 struct rq *rq;
10218
10219 rq = task_rq_lock(task, &rf);
10220
10221 update_rq_clock(rq);
10222 sched_change_group(task, TASK_SET_GROUP);
10223
10224 task_rq_unlock(rq, task, &rf);
10225 }
10226
cpu_cgroup_can_attach(struct cgroup_taskset * tset)10227 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
10228 {
10229 struct task_struct *task;
10230 struct cgroup_subsys_state *css;
10231 int ret = 0;
10232
10233 cgroup_taskset_for_each(task, css, tset) {
10234 #ifdef CONFIG_RT_GROUP_SCHED
10235 if (!sched_rt_can_attach(css_tg(css), task))
10236 return -EINVAL;
10237 #endif
10238 /*
10239 * Serialize against wake_up_new_task() such that if it's
10240 * running, we're sure to observe its full state.
10241 */
10242 raw_spin_lock_irq(&task->pi_lock);
10243 /*
10244 * Avoid calling sched_move_task() before wake_up_new_task()
10245 * has happened. This would lead to problems with PELT, due to
10246 * move wanting to detach+attach while we're not attached yet.
10247 */
10248 if (READ_ONCE(task->__state) == TASK_NEW)
10249 ret = -EINVAL;
10250 raw_spin_unlock_irq(&task->pi_lock);
10251
10252 if (ret)
10253 break;
10254 }
10255
10256 trace_android_rvh_cpu_cgroup_can_attach(tset, &ret);
10257
10258 return ret;
10259 }
10260
cpu_cgroup_attach(struct cgroup_taskset * tset)10261 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
10262 {
10263 struct task_struct *task;
10264 struct cgroup_subsys_state *css;
10265
10266 cgroup_taskset_for_each(task, css, tset)
10267 sched_move_task(task);
10268
10269 trace_android_rvh_cpu_cgroup_attach(tset);
10270 }
10271
10272 #ifdef CONFIG_UCLAMP_TASK_GROUP
cpu_util_update_eff(struct cgroup_subsys_state * css)10273 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
10274 {
10275 struct cgroup_subsys_state *top_css = css;
10276 struct uclamp_se *uc_parent = NULL;
10277 struct uclamp_se *uc_se = NULL;
10278 unsigned int eff[UCLAMP_CNT];
10279 enum uclamp_id clamp_id;
10280 unsigned int clamps;
10281
10282 lockdep_assert_held(&uclamp_mutex);
10283 SCHED_WARN_ON(!rcu_read_lock_held());
10284
10285 css_for_each_descendant_pre(css, top_css) {
10286 uc_parent = css_tg(css)->parent
10287 ? css_tg(css)->parent->uclamp : NULL;
10288
10289 for_each_clamp_id(clamp_id) {
10290 /* Assume effective clamps matches requested clamps */
10291 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
10292 /* Cap effective clamps with parent's effective clamps */
10293 if (uc_parent &&
10294 eff[clamp_id] > uc_parent[clamp_id].value) {
10295 eff[clamp_id] = uc_parent[clamp_id].value;
10296 }
10297 }
10298 /* Ensure protection is always capped by limit */
10299 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
10300
10301 /* Propagate most restrictive effective clamps */
10302 clamps = 0x0;
10303 uc_se = css_tg(css)->uclamp;
10304 for_each_clamp_id(clamp_id) {
10305 if (eff[clamp_id] == uc_se[clamp_id].value)
10306 continue;
10307 uc_se[clamp_id].value = eff[clamp_id];
10308 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
10309 clamps |= (0x1 << clamp_id);
10310 }
10311 if (!clamps) {
10312 css = css_rightmost_descendant(css);
10313 continue;
10314 }
10315
10316 /* Immediately update descendants RUNNABLE tasks */
10317 uclamp_update_active_tasks(css);
10318 }
10319 }
10320
10321 /*
10322 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
10323 * C expression. Since there is no way to convert a macro argument (N) into a
10324 * character constant, use two levels of macros.
10325 */
10326 #define _POW10(exp) ((unsigned int)1e##exp)
10327 #define POW10(exp) _POW10(exp)
10328
10329 struct uclamp_request {
10330 #define UCLAMP_PERCENT_SHIFT 2
10331 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
10332 s64 percent;
10333 u64 util;
10334 int ret;
10335 };
10336
10337 static inline struct uclamp_request
capacity_from_percent(char * buf)10338 capacity_from_percent(char *buf)
10339 {
10340 struct uclamp_request req = {
10341 .percent = UCLAMP_PERCENT_SCALE,
10342 .util = SCHED_CAPACITY_SCALE,
10343 .ret = 0,
10344 };
10345
10346 buf = strim(buf);
10347 if (strcmp(buf, "max")) {
10348 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
10349 &req.percent);
10350 if (req.ret)
10351 return req;
10352 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
10353 req.ret = -ERANGE;
10354 return req;
10355 }
10356
10357 req.util = req.percent << SCHED_CAPACITY_SHIFT;
10358 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
10359 }
10360
10361 return req;
10362 }
10363
cpu_uclamp_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,enum uclamp_id clamp_id)10364 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
10365 size_t nbytes, loff_t off,
10366 enum uclamp_id clamp_id)
10367 {
10368 struct uclamp_request req;
10369 struct task_group *tg;
10370
10371 req = capacity_from_percent(buf);
10372 if (req.ret)
10373 return req.ret;
10374
10375 static_branch_enable(&sched_uclamp_used);
10376
10377 mutex_lock(&uclamp_mutex);
10378 rcu_read_lock();
10379
10380 tg = css_tg(of_css(of));
10381 if (tg->uclamp_req[clamp_id].value != req.util)
10382 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
10383
10384 /*
10385 * Because of not recoverable conversion rounding we keep track of the
10386 * exact requested value
10387 */
10388 tg->uclamp_pct[clamp_id] = req.percent;
10389
10390 /* Update effective clamps to track the most restrictive value */
10391 cpu_util_update_eff(of_css(of));
10392
10393 rcu_read_unlock();
10394 mutex_unlock(&uclamp_mutex);
10395
10396 return nbytes;
10397 }
10398
cpu_uclamp_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)10399 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
10400 char *buf, size_t nbytes,
10401 loff_t off)
10402 {
10403 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
10404 }
10405
cpu_uclamp_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)10406 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
10407 char *buf, size_t nbytes,
10408 loff_t off)
10409 {
10410 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
10411 }
10412
cpu_uclamp_print(struct seq_file * sf,enum uclamp_id clamp_id)10413 static inline void cpu_uclamp_print(struct seq_file *sf,
10414 enum uclamp_id clamp_id)
10415 {
10416 struct task_group *tg;
10417 u64 util_clamp;
10418 u64 percent;
10419 u32 rem;
10420
10421 rcu_read_lock();
10422 tg = css_tg(seq_css(sf));
10423 util_clamp = tg->uclamp_req[clamp_id].value;
10424 rcu_read_unlock();
10425
10426 if (util_clamp == SCHED_CAPACITY_SCALE) {
10427 seq_puts(sf, "max\n");
10428 return;
10429 }
10430
10431 percent = tg->uclamp_pct[clamp_id];
10432 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
10433 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
10434 }
10435
cpu_uclamp_min_show(struct seq_file * sf,void * v)10436 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
10437 {
10438 cpu_uclamp_print(sf, UCLAMP_MIN);
10439 return 0;
10440 }
10441
cpu_uclamp_max_show(struct seq_file * sf,void * v)10442 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
10443 {
10444 cpu_uclamp_print(sf, UCLAMP_MAX);
10445 return 0;
10446 }
10447
cpu_uclamp_ls_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 ls)10448 static int cpu_uclamp_ls_write_u64(struct cgroup_subsys_state *css,
10449 struct cftype *cftype, u64 ls)
10450 {
10451 struct task_group *tg;
10452
10453 if (ls > 1)
10454 return -EINVAL;
10455 tg = css_tg(css);
10456 tg->latency_sensitive = (unsigned int) ls;
10457
10458 return 0;
10459 }
10460
cpu_uclamp_ls_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)10461 static u64 cpu_uclamp_ls_read_u64(struct cgroup_subsys_state *css,
10462 struct cftype *cft)
10463 {
10464 struct task_group *tg = css_tg(css);
10465
10466 return (u64) tg->latency_sensitive;
10467 }
10468 #endif /* CONFIG_UCLAMP_TASK_GROUP */
10469
10470 #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)10471 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
10472 struct cftype *cftype, u64 shareval)
10473 {
10474 if (shareval > scale_load_down(ULONG_MAX))
10475 shareval = MAX_SHARES;
10476 return sched_group_set_shares(css_tg(css), scale_load(shareval));
10477 }
10478
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)10479 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
10480 struct cftype *cft)
10481 {
10482 struct task_group *tg = css_tg(css);
10483
10484 return (u64) scale_load_down(tg->shares);
10485 }
10486
10487 #ifdef CONFIG_CFS_BANDWIDTH
10488 static DEFINE_MUTEX(cfs_constraints_mutex);
10489
10490 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
10491 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
10492 /* More than 203 days if BW_SHIFT equals 20. */
10493 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
10494
10495 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
10496
tg_set_cfs_bandwidth(struct task_group * tg,u64 period,u64 quota,u64 burst)10497 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
10498 u64 burst)
10499 {
10500 int i, ret = 0, runtime_enabled, runtime_was_enabled;
10501 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10502
10503 if (tg == &root_task_group)
10504 return -EINVAL;
10505
10506 /*
10507 * Ensure we have at some amount of bandwidth every period. This is
10508 * to prevent reaching a state of large arrears when throttled via
10509 * entity_tick() resulting in prolonged exit starvation.
10510 */
10511 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
10512 return -EINVAL;
10513
10514 /*
10515 * Likewise, bound things on the other side by preventing insane quota
10516 * periods. This also allows us to normalize in computing quota
10517 * feasibility.
10518 */
10519 if (period > max_cfs_quota_period)
10520 return -EINVAL;
10521
10522 /*
10523 * Bound quota to defend quota against overflow during bandwidth shift.
10524 */
10525 if (quota != RUNTIME_INF && quota > max_cfs_runtime)
10526 return -EINVAL;
10527
10528 if (quota != RUNTIME_INF && (burst > quota ||
10529 burst + quota > max_cfs_runtime))
10530 return -EINVAL;
10531
10532 /*
10533 * Prevent race between setting of cfs_rq->runtime_enabled and
10534 * unthrottle_offline_cfs_rqs().
10535 */
10536 cpus_read_lock();
10537 mutex_lock(&cfs_constraints_mutex);
10538 ret = __cfs_schedulable(tg, period, quota);
10539 if (ret)
10540 goto out_unlock;
10541
10542 runtime_enabled = quota != RUNTIME_INF;
10543 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
10544 /*
10545 * If we need to toggle cfs_bandwidth_used, off->on must occur
10546 * before making related changes, and on->off must occur afterwards
10547 */
10548 if (runtime_enabled && !runtime_was_enabled)
10549 cfs_bandwidth_usage_inc();
10550 raw_spin_lock_irq(&cfs_b->lock);
10551 cfs_b->period = ns_to_ktime(period);
10552 cfs_b->quota = quota;
10553 cfs_b->burst = burst;
10554
10555 __refill_cfs_bandwidth_runtime(cfs_b);
10556
10557 /* Restart the period timer (if active) to handle new period expiry: */
10558 if (runtime_enabled)
10559 start_cfs_bandwidth(cfs_b);
10560
10561 raw_spin_unlock_irq(&cfs_b->lock);
10562
10563 for_each_online_cpu(i) {
10564 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
10565 struct rq *rq = cfs_rq->rq;
10566 struct rq_flags rf;
10567
10568 rq_lock_irq(rq, &rf);
10569 cfs_rq->runtime_enabled = runtime_enabled;
10570 cfs_rq->runtime_remaining = 0;
10571
10572 if (cfs_rq->throttled)
10573 unthrottle_cfs_rq(cfs_rq);
10574 rq_unlock_irq(rq, &rf);
10575 }
10576 if (runtime_was_enabled && !runtime_enabled)
10577 cfs_bandwidth_usage_dec();
10578 out_unlock:
10579 mutex_unlock(&cfs_constraints_mutex);
10580 cpus_read_unlock();
10581
10582 return ret;
10583 }
10584
tg_set_cfs_quota(struct task_group * tg,long cfs_quota_us)10585 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
10586 {
10587 u64 quota, period, burst;
10588
10589 period = ktime_to_ns(tg->cfs_bandwidth.period);
10590 burst = tg->cfs_bandwidth.burst;
10591 if (cfs_quota_us < 0)
10592 quota = RUNTIME_INF;
10593 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
10594 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
10595 else
10596 return -EINVAL;
10597
10598 return tg_set_cfs_bandwidth(tg, period, quota, burst);
10599 }
10600
tg_get_cfs_quota(struct task_group * tg)10601 static long tg_get_cfs_quota(struct task_group *tg)
10602 {
10603 u64 quota_us;
10604
10605 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
10606 return -1;
10607
10608 quota_us = tg->cfs_bandwidth.quota;
10609 do_div(quota_us, NSEC_PER_USEC);
10610
10611 return quota_us;
10612 }
10613
tg_set_cfs_period(struct task_group * tg,long cfs_period_us)10614 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
10615 {
10616 u64 quota, period, burst;
10617
10618 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
10619 return -EINVAL;
10620
10621 period = (u64)cfs_period_us * NSEC_PER_USEC;
10622 quota = tg->cfs_bandwidth.quota;
10623 burst = tg->cfs_bandwidth.burst;
10624
10625 return tg_set_cfs_bandwidth(tg, period, quota, burst);
10626 }
10627
tg_get_cfs_period(struct task_group * tg)10628 static long tg_get_cfs_period(struct task_group *tg)
10629 {
10630 u64 cfs_period_us;
10631
10632 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
10633 do_div(cfs_period_us, NSEC_PER_USEC);
10634
10635 return cfs_period_us;
10636 }
10637
tg_set_cfs_burst(struct task_group * tg,long cfs_burst_us)10638 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
10639 {
10640 u64 quota, period, burst;
10641
10642 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
10643 return -EINVAL;
10644
10645 burst = (u64)cfs_burst_us * NSEC_PER_USEC;
10646 period = ktime_to_ns(tg->cfs_bandwidth.period);
10647 quota = tg->cfs_bandwidth.quota;
10648
10649 return tg_set_cfs_bandwidth(tg, period, quota, burst);
10650 }
10651
tg_get_cfs_burst(struct task_group * tg)10652 static long tg_get_cfs_burst(struct task_group *tg)
10653 {
10654 u64 burst_us;
10655
10656 burst_us = tg->cfs_bandwidth.burst;
10657 do_div(burst_us, NSEC_PER_USEC);
10658
10659 return burst_us;
10660 }
10661
cpu_cfs_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)10662 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
10663 struct cftype *cft)
10664 {
10665 return tg_get_cfs_quota(css_tg(css));
10666 }
10667
cpu_cfs_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 cfs_quota_us)10668 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
10669 struct cftype *cftype, s64 cfs_quota_us)
10670 {
10671 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
10672 }
10673
cpu_cfs_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)10674 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
10675 struct cftype *cft)
10676 {
10677 return tg_get_cfs_period(css_tg(css));
10678 }
10679
cpu_cfs_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_period_us)10680 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
10681 struct cftype *cftype, u64 cfs_period_us)
10682 {
10683 return tg_set_cfs_period(css_tg(css), cfs_period_us);
10684 }
10685
cpu_cfs_burst_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)10686 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
10687 struct cftype *cft)
10688 {
10689 return tg_get_cfs_burst(css_tg(css));
10690 }
10691
cpu_cfs_burst_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_burst_us)10692 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
10693 struct cftype *cftype, u64 cfs_burst_us)
10694 {
10695 return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
10696 }
10697
10698 struct cfs_schedulable_data {
10699 struct task_group *tg;
10700 u64 period, quota;
10701 };
10702
10703 /*
10704 * normalize group quota/period to be quota/max_period
10705 * note: units are usecs
10706 */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)10707 static u64 normalize_cfs_quota(struct task_group *tg,
10708 struct cfs_schedulable_data *d)
10709 {
10710 u64 quota, period;
10711
10712 if (tg == d->tg) {
10713 period = d->period;
10714 quota = d->quota;
10715 } else {
10716 period = tg_get_cfs_period(tg);
10717 quota = tg_get_cfs_quota(tg);
10718 }
10719
10720 /* note: these should typically be equivalent */
10721 if (quota == RUNTIME_INF || quota == -1)
10722 return RUNTIME_INF;
10723
10724 return to_ratio(period, quota);
10725 }
10726
tg_cfs_schedulable_down(struct task_group * tg,void * data)10727 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
10728 {
10729 struct cfs_schedulable_data *d = data;
10730 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10731 s64 quota = 0, parent_quota = -1;
10732
10733 if (!tg->parent) {
10734 quota = RUNTIME_INF;
10735 } else {
10736 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
10737
10738 quota = normalize_cfs_quota(tg, d);
10739 parent_quota = parent_b->hierarchical_quota;
10740
10741 /*
10742 * Ensure max(child_quota) <= parent_quota. On cgroup2,
10743 * always take the min. On cgroup1, only inherit when no
10744 * limit is set:
10745 */
10746 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
10747 quota = min(quota, parent_quota);
10748 } else {
10749 if (quota == RUNTIME_INF)
10750 quota = parent_quota;
10751 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
10752 return -EINVAL;
10753 }
10754 }
10755 cfs_b->hierarchical_quota = quota;
10756
10757 return 0;
10758 }
10759
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)10760 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
10761 {
10762 int ret;
10763 struct cfs_schedulable_data data = {
10764 .tg = tg,
10765 .period = period,
10766 .quota = quota,
10767 };
10768
10769 if (quota != RUNTIME_INF) {
10770 do_div(data.period, NSEC_PER_USEC);
10771 do_div(data.quota, NSEC_PER_USEC);
10772 }
10773
10774 rcu_read_lock();
10775 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
10776 rcu_read_unlock();
10777
10778 return ret;
10779 }
10780
cpu_cfs_stat_show(struct seq_file * sf,void * v)10781 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
10782 {
10783 struct task_group *tg = css_tg(seq_css(sf));
10784 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10785
10786 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
10787 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
10788 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
10789
10790 if (schedstat_enabled() && tg != &root_task_group) {
10791 u64 ws = 0;
10792 int i;
10793
10794 for_each_possible_cpu(i)
10795 ws += schedstat_val(tg->se[i]->statistics.wait_sum);
10796
10797 seq_printf(sf, "wait_sum %llu\n", ws);
10798 }
10799
10800 return 0;
10801 }
10802 #endif /* CONFIG_CFS_BANDWIDTH */
10803 #endif /* CONFIG_FAIR_GROUP_SCHED */
10804
10805 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)10806 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
10807 struct cftype *cft, s64 val)
10808 {
10809 return sched_group_set_rt_runtime(css_tg(css), val);
10810 }
10811
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)10812 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
10813 struct cftype *cft)
10814 {
10815 return sched_group_rt_runtime(css_tg(css));
10816 }
10817
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)10818 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
10819 struct cftype *cftype, u64 rt_period_us)
10820 {
10821 return sched_group_set_rt_period(css_tg(css), rt_period_us);
10822 }
10823
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)10824 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
10825 struct cftype *cft)
10826 {
10827 return sched_group_rt_period(css_tg(css));
10828 }
10829 #endif /* CONFIG_RT_GROUP_SCHED */
10830
10831 #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_idle_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)10832 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
10833 struct cftype *cft)
10834 {
10835 return css_tg(css)->idle;
10836 }
10837
cpu_idle_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 idle)10838 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
10839 struct cftype *cft, s64 idle)
10840 {
10841 return sched_group_set_idle(css_tg(css), idle);
10842 }
10843 #endif
10844
10845 static struct cftype cpu_legacy_files[] = {
10846 #ifdef CONFIG_FAIR_GROUP_SCHED
10847 {
10848 .name = "shares",
10849 .read_u64 = cpu_shares_read_u64,
10850 .write_u64 = cpu_shares_write_u64,
10851 },
10852 {
10853 .name = "idle",
10854 .read_s64 = cpu_idle_read_s64,
10855 .write_s64 = cpu_idle_write_s64,
10856 },
10857 #endif
10858 #ifdef CONFIG_CFS_BANDWIDTH
10859 {
10860 .name = "cfs_quota_us",
10861 .read_s64 = cpu_cfs_quota_read_s64,
10862 .write_s64 = cpu_cfs_quota_write_s64,
10863 },
10864 {
10865 .name = "cfs_period_us",
10866 .read_u64 = cpu_cfs_period_read_u64,
10867 .write_u64 = cpu_cfs_period_write_u64,
10868 },
10869 {
10870 .name = "cfs_burst_us",
10871 .read_u64 = cpu_cfs_burst_read_u64,
10872 .write_u64 = cpu_cfs_burst_write_u64,
10873 },
10874 {
10875 .name = "stat",
10876 .seq_show = cpu_cfs_stat_show,
10877 },
10878 #endif
10879 #ifdef CONFIG_RT_GROUP_SCHED
10880 {
10881 .name = "rt_runtime_us",
10882 .read_s64 = cpu_rt_runtime_read,
10883 .write_s64 = cpu_rt_runtime_write,
10884 },
10885 {
10886 .name = "rt_period_us",
10887 .read_u64 = cpu_rt_period_read_uint,
10888 .write_u64 = cpu_rt_period_write_uint,
10889 },
10890 #endif
10891 #ifdef CONFIG_UCLAMP_TASK_GROUP
10892 {
10893 .name = "uclamp.min",
10894 .flags = CFTYPE_NOT_ON_ROOT,
10895 .seq_show = cpu_uclamp_min_show,
10896 .write = cpu_uclamp_min_write,
10897 },
10898 {
10899 .name = "uclamp.max",
10900 .flags = CFTYPE_NOT_ON_ROOT,
10901 .seq_show = cpu_uclamp_max_show,
10902 .write = cpu_uclamp_max_write,
10903 },
10904 {
10905 .name = "uclamp.latency_sensitive",
10906 .flags = CFTYPE_NOT_ON_ROOT,
10907 .read_u64 = cpu_uclamp_ls_read_u64,
10908 .write_u64 = cpu_uclamp_ls_write_u64,
10909 },
10910 #endif
10911 { } /* Terminate */
10912 };
10913
cpu_extra_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)10914 static int cpu_extra_stat_show(struct seq_file *sf,
10915 struct cgroup_subsys_state *css)
10916 {
10917 #ifdef CONFIG_CFS_BANDWIDTH
10918 {
10919 struct task_group *tg = css_tg(css);
10920 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10921 u64 throttled_usec;
10922
10923 throttled_usec = cfs_b->throttled_time;
10924 do_div(throttled_usec, NSEC_PER_USEC);
10925
10926 seq_printf(sf, "nr_periods %d\n"
10927 "nr_throttled %d\n"
10928 "throttled_usec %llu\n",
10929 cfs_b->nr_periods, cfs_b->nr_throttled,
10930 throttled_usec);
10931 }
10932 #endif
10933 return 0;
10934 }
10935
10936 #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_weight_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)10937 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
10938 struct cftype *cft)
10939 {
10940 struct task_group *tg = css_tg(css);
10941 u64 weight = scale_load_down(tg->shares);
10942
10943 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
10944 }
10945
cpu_weight_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 weight)10946 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
10947 struct cftype *cft, u64 weight)
10948 {
10949 /*
10950 * cgroup weight knobs should use the common MIN, DFL and MAX
10951 * values which are 1, 100 and 10000 respectively. While it loses
10952 * a bit of range on both ends, it maps pretty well onto the shares
10953 * value used by scheduler and the round-trip conversions preserve
10954 * the original value over the entire range.
10955 */
10956 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
10957 return -ERANGE;
10958
10959 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
10960
10961 return sched_group_set_shares(css_tg(css), scale_load(weight));
10962 }
10963
cpu_weight_nice_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)10964 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
10965 struct cftype *cft)
10966 {
10967 unsigned long weight = scale_load_down(css_tg(css)->shares);
10968 int last_delta = INT_MAX;
10969 int prio, delta;
10970
10971 /* find the closest nice value to the current weight */
10972 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
10973 delta = abs(sched_prio_to_weight[prio] - weight);
10974 if (delta >= last_delta)
10975 break;
10976 last_delta = delta;
10977 }
10978
10979 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
10980 }
10981
cpu_weight_nice_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 nice)10982 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
10983 struct cftype *cft, s64 nice)
10984 {
10985 unsigned long weight;
10986 int idx;
10987
10988 if (nice < MIN_NICE || nice > MAX_NICE)
10989 return -ERANGE;
10990
10991 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
10992 idx = array_index_nospec(idx, 40);
10993 weight = sched_prio_to_weight[idx];
10994
10995 return sched_group_set_shares(css_tg(css), scale_load(weight));
10996 }
10997 #endif
10998
cpu_period_quota_print(struct seq_file * sf,long period,long quota)10999 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
11000 long period, long quota)
11001 {
11002 if (quota < 0)
11003 seq_puts(sf, "max");
11004 else
11005 seq_printf(sf, "%ld", quota);
11006
11007 seq_printf(sf, " %ld\n", period);
11008 }
11009
11010 /* caller should put the current value in *@periodp before calling */
cpu_period_quota_parse(char * buf,u64 * periodp,u64 * quotap)11011 static int __maybe_unused cpu_period_quota_parse(char *buf,
11012 u64 *periodp, u64 *quotap)
11013 {
11014 char tok[21]; /* U64_MAX */
11015
11016 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
11017 return -EINVAL;
11018
11019 *periodp *= NSEC_PER_USEC;
11020
11021 if (sscanf(tok, "%llu", quotap))
11022 *quotap *= NSEC_PER_USEC;
11023 else if (!strcmp(tok, "max"))
11024 *quotap = RUNTIME_INF;
11025 else
11026 return -EINVAL;
11027
11028 return 0;
11029 }
11030
11031 #ifdef CONFIG_CFS_BANDWIDTH
cpu_max_show(struct seq_file * sf,void * v)11032 static int cpu_max_show(struct seq_file *sf, void *v)
11033 {
11034 struct task_group *tg = css_tg(seq_css(sf));
11035
11036 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
11037 return 0;
11038 }
11039
cpu_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)11040 static ssize_t cpu_max_write(struct kernfs_open_file *of,
11041 char *buf, size_t nbytes, loff_t off)
11042 {
11043 struct task_group *tg = css_tg(of_css(of));
11044 u64 period = tg_get_cfs_period(tg);
11045 u64 burst = tg_get_cfs_burst(tg);
11046 u64 quota;
11047 int ret;
11048
11049 ret = cpu_period_quota_parse(buf, &period, "a);
11050 if (!ret)
11051 ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
11052 return ret ?: nbytes;
11053 }
11054 #endif
11055
11056 static struct cftype cpu_files[] = {
11057 #ifdef CONFIG_FAIR_GROUP_SCHED
11058 {
11059 .name = "weight",
11060 .flags = CFTYPE_NOT_ON_ROOT,
11061 .read_u64 = cpu_weight_read_u64,
11062 .write_u64 = cpu_weight_write_u64,
11063 },
11064 {
11065 .name = "weight.nice",
11066 .flags = CFTYPE_NOT_ON_ROOT,
11067 .read_s64 = cpu_weight_nice_read_s64,
11068 .write_s64 = cpu_weight_nice_write_s64,
11069 },
11070 {
11071 .name = "idle",
11072 .flags = CFTYPE_NOT_ON_ROOT,
11073 .read_s64 = cpu_idle_read_s64,
11074 .write_s64 = cpu_idle_write_s64,
11075 },
11076 #endif
11077 #ifdef CONFIG_CFS_BANDWIDTH
11078 {
11079 .name = "max",
11080 .flags = CFTYPE_NOT_ON_ROOT,
11081 .seq_show = cpu_max_show,
11082 .write = cpu_max_write,
11083 },
11084 {
11085 .name = "max.burst",
11086 .flags = CFTYPE_NOT_ON_ROOT,
11087 .read_u64 = cpu_cfs_burst_read_u64,
11088 .write_u64 = cpu_cfs_burst_write_u64,
11089 },
11090 #endif
11091 #ifdef CONFIG_UCLAMP_TASK_GROUP
11092 {
11093 .name = "uclamp.min",
11094 .flags = CFTYPE_NOT_ON_ROOT,
11095 .seq_show = cpu_uclamp_min_show,
11096 .write = cpu_uclamp_min_write,
11097 },
11098 {
11099 .name = "uclamp.max",
11100 .flags = CFTYPE_NOT_ON_ROOT,
11101 .seq_show = cpu_uclamp_max_show,
11102 .write = cpu_uclamp_max_write,
11103 },
11104 {
11105 .name = "uclamp.latency_sensitive",
11106 .flags = CFTYPE_NOT_ON_ROOT,
11107 .read_u64 = cpu_uclamp_ls_read_u64,
11108 .write_u64 = cpu_uclamp_ls_write_u64,
11109 },
11110 #endif
11111 { } /* terminate */
11112 };
11113
11114 struct cgroup_subsys cpu_cgrp_subsys = {
11115 .css_alloc = cpu_cgroup_css_alloc,
11116 .css_online = cpu_cgroup_css_online,
11117 .css_released = cpu_cgroup_css_released,
11118 .css_free = cpu_cgroup_css_free,
11119 .css_extra_stat_show = cpu_extra_stat_show,
11120 .fork = cpu_cgroup_fork,
11121 .can_attach = cpu_cgroup_can_attach,
11122 .attach = cpu_cgroup_attach,
11123 .legacy_cftypes = cpu_legacy_files,
11124 .dfl_cftypes = cpu_files,
11125 .early_init = true,
11126 .threaded = true,
11127 };
11128
11129 #endif /* CONFIG_CGROUP_SCHED */
11130
dump_cpu_task(int cpu)11131 void dump_cpu_task(int cpu)
11132 {
11133 pr_info("Task dump for CPU %d:\n", cpu);
11134 sched_show_task(cpu_curr(cpu));
11135 }
11136
11137 /*
11138 * Nice levels are multiplicative, with a gentle 10% change for every
11139 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
11140 * nice 1, it will get ~10% less CPU time than another CPU-bound task
11141 * that remained on nice 0.
11142 *
11143 * The "10% effect" is relative and cumulative: from _any_ nice level,
11144 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
11145 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
11146 * If a task goes up by ~10% and another task goes down by ~10% then
11147 * the relative distance between them is ~25%.)
11148 */
11149 const int sched_prio_to_weight[40] = {
11150 /* -20 */ 88761, 71755, 56483, 46273, 36291,
11151 /* -15 */ 29154, 23254, 18705, 14949, 11916,
11152 /* -10 */ 9548, 7620, 6100, 4904, 3906,
11153 /* -5 */ 3121, 2501, 1991, 1586, 1277,
11154 /* 0 */ 1024, 820, 655, 526, 423,
11155 /* 5 */ 335, 272, 215, 172, 137,
11156 /* 10 */ 110, 87, 70, 56, 45,
11157 /* 15 */ 36, 29, 23, 18, 15,
11158 };
11159
11160 /*
11161 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
11162 *
11163 * In cases where the weight does not change often, we can use the
11164 * precalculated inverse to speed up arithmetics by turning divisions
11165 * into multiplications:
11166 */
11167 const u32 sched_prio_to_wmult[40] = {
11168 /* -20 */ 48388, 59856, 76040, 92818, 118348,
11169 /* -15 */ 147320, 184698, 229616, 287308, 360437,
11170 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
11171 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
11172 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
11173 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
11174 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
11175 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
11176 };
11177
call_trace_sched_update_nr_running(struct rq * rq,int count)11178 void call_trace_sched_update_nr_running(struct rq *rq, int count)
11179 {
11180 trace_sched_update_nr_running_tp(rq, count);
11181 }
11182