• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  kernel/sched/core.c
4  *
5  *  Core kernel scheduler code and related syscalls
6  *
7  *  Copyright (C) 1991-2002  Linus Torvalds
8  */
9 #define CREATE_TRACE_POINTS
10 #include <trace/events/sched.h>
11 #undef CREATE_TRACE_POINTS
12 
13 #include "sched.h"
14 
15 #include <linux/nospec.h>
16 
17 #include <linux/kcov.h>
18 #include <linux/scs.h>
19 #include <linux/irq.h>
20 #include <linux/delay.h>
21 
22 #include <asm/switch_to.h>
23 #include <asm/tlb.h>
24 
25 #include "../workqueue_internal.h"
26 #include "../../fs/io-wq.h"
27 #include "../smpboot.h"
28 
29 #include "pelt.h"
30 #include "smp.h"
31 #include "walt.h"
32 #include "rtg/rtg.h"
33 
34 /*
35  * Export tracepoints that act as a bare tracehook (ie: have no trace event
36  * associated with them) to allow external modules to probe them.
37  */
38 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
39 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
40 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
41 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
42 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
43 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
44 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
45 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
46 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
47 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
48 
49 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
50 
51 #ifdef CONFIG_SCHED_DEBUG
52 /*
53  * Debugging: various feature bits
54  *
55  * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
56  * sysctl_sched_features, defined in sched.h, to allow constants propagation
57  * at compile time and compiler optimization based on features default.
58  */
59 #define SCHED_FEAT(name, enabled)	\
60 	(1UL << __SCHED_FEAT_##name) * enabled |
61 const_debug unsigned int sysctl_sched_features =
62 #include "features.h"
63 	0;
64 #undef SCHED_FEAT
65 #endif
66 
67 /*
68  * Number of tasks to iterate in a single balance run.
69  * Limited because this is done with IRQs disabled.
70  */
71 const_debug unsigned int sysctl_sched_nr_migrate = 32;
72 
73 /*
74  * period over which we measure -rt task CPU usage in us.
75  * default: 1s
76  */
77 unsigned int sysctl_sched_rt_period = 1000000;
78 
79 __read_mostly int scheduler_running;
80 
81 /*
82  * part of the period that we allow rt tasks to run in us.
83  * default: 0.95s
84  */
85 int sysctl_sched_rt_runtime = 950000;
86 
87 
88 /*
89  * Serialization rules:
90  *
91  * Lock order:
92  *
93  *   p->pi_lock
94  *     rq->lock
95  *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
96  *
97  *  rq1->lock
98  *    rq2->lock  where: rq1 < rq2
99  *
100  * Regular state:
101  *
102  * Normal scheduling state is serialized by rq->lock. __schedule() takes the
103  * local CPU's rq->lock, it optionally removes the task from the runqueue and
104  * always looks at the local rq data structures to find the most elegible task
105  * to run next.
106  *
107  * Task enqueue is also under rq->lock, possibly taken from another CPU.
108  * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
109  * the local CPU to avoid bouncing the runqueue state around [ see
110  * ttwu_queue_wakelist() ]
111  *
112  * Task wakeup, specifically wakeups that involve migration, are horribly
113  * complicated to avoid having to take two rq->locks.
114  *
115  * Special state:
116  *
117  * System-calls and anything external will use task_rq_lock() which acquires
118  * both p->pi_lock and rq->lock. As a consequence the state they change is
119  * stable while holding either lock:
120  *
121  *  - sched_setaffinity()/
122  *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
123  *  - set_user_nice():		p->se.load, p->*prio
124  *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
125  *				p->se.load, p->rt_priority,
126  *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
127  *  - sched_setnuma():		p->numa_preferred_nid
128  *  - sched_move_task()/
129  *    cpu_cgroup_fork():	p->sched_task_group
130  *  - uclamp_update_active()	p->uclamp*
131  *
132  * p->state <- TASK_*:
133  *
134  *   is changed locklessly using set_current_state(), __set_current_state() or
135  *   set_special_state(), see their respective comments, or by
136  *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
137  *   concurrent self.
138  *
139  * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
140  *
141  *   is set by activate_task() and cleared by deactivate_task(), under
142  *   rq->lock. Non-zero indicates the task is runnable, the special
143  *   ON_RQ_MIGRATING state is used for migration without holding both
144  *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
145  *
146  * p->on_cpu <- { 0, 1 }:
147  *
148  *   is set by prepare_task() and cleared by finish_task() such that it will be
149  *   set before p is scheduled-in and cleared after p is scheduled-out, both
150  *   under rq->lock. Non-zero indicates the task is running on its CPU.
151  *
152  *   [ The astute reader will observe that it is possible for two tasks on one
153  *     CPU to have ->on_cpu = 1 at the same time. ]
154  *
155  * task_cpu(p): is changed by set_task_cpu(), the rules are:
156  *
157  *  - Don't call set_task_cpu() on a blocked task:
158  *
159  *    We don't care what CPU we're not running on, this simplifies hotplug,
160  *    the CPU assignment of blocked tasks isn't required to be valid.
161  *
162  *  - for try_to_wake_up(), called under p->pi_lock:
163  *
164  *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
165  *
166  *  - for migration called under rq->lock:
167  *    [ see task_on_rq_migrating() in task_rq_lock() ]
168  *
169  *    o move_queued_task()
170  *    o detach_task()
171  *
172  *  - for migration called under double_rq_lock():
173  *
174  *    o __migrate_swap_task()
175  *    o push_rt_task() / pull_rt_task()
176  *    o push_dl_task() / pull_dl_task()
177  *    o dl_task_offline_migration()
178  *
179  */
180 
181 /*
182  * __task_rq_lock - lock the rq @p resides on.
183  */
__task_rq_lock(struct task_struct * p,struct rq_flags * rf)184 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
185 	__acquires(rq->lock)
186 {
187 	struct rq *rq;
188 
189 	lockdep_assert_held(&p->pi_lock);
190 
191 	for (;;) {
192 		rq = task_rq(p);
193 		raw_spin_lock(&rq->lock);
194 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
195 			rq_pin_lock(rq, rf);
196 			return rq;
197 		}
198 		raw_spin_unlock(&rq->lock);
199 
200 		while (unlikely(task_on_rq_migrating(p)))
201 			cpu_relax();
202 	}
203 }
204 
205 /*
206  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
207  */
task_rq_lock(struct task_struct * p,struct rq_flags * rf)208 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
209 	__acquires(p->pi_lock)
210 	__acquires(rq->lock)
211 {
212 	struct rq *rq;
213 
214 	for (;;) {
215 		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
216 		rq = task_rq(p);
217 		raw_spin_lock(&rq->lock);
218 		/*
219 		 *	move_queued_task()		task_rq_lock()
220 		 *
221 		 *	ACQUIRE (rq->lock)
222 		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
223 		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
224 		 *	[S] ->cpu = new_cpu		[L] task_rq()
225 		 *					[L] ->on_rq
226 		 *	RELEASE (rq->lock)
227 		 *
228 		 * If we observe the old CPU in task_rq_lock(), the acquire of
229 		 * the old rq->lock will fully serialize against the stores.
230 		 *
231 		 * If we observe the new CPU in task_rq_lock(), the address
232 		 * dependency headed by '[L] rq = task_rq()' and the acquire
233 		 * will pair with the WMB to ensure we then also see migrating.
234 		 */
235 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
236 			rq_pin_lock(rq, rf);
237 			return rq;
238 		}
239 		raw_spin_unlock(&rq->lock);
240 		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
241 
242 		while (unlikely(task_on_rq_migrating(p)))
243 			cpu_relax();
244 	}
245 }
246 
247 /*
248  * RQ-clock updating methods:
249  */
250 
update_rq_clock_task(struct rq * rq,s64 delta)251 static void update_rq_clock_task(struct rq *rq, s64 delta)
252 {
253 /*
254  * In theory, the compile should just see 0 here, and optimize out the call
255  * to sched_rt_avg_update. But I don't trust it...
256  */
257 	s64 __maybe_unused steal = 0, irq_delta = 0;
258 
259 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
260 	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
261 
262 	/*
263 	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
264 	 * this case when a previous update_rq_clock() happened inside a
265 	 * {soft,}irq region.
266 	 *
267 	 * When this happens, we stop ->clock_task and only update the
268 	 * prev_irq_time stamp to account for the part that fit, so that a next
269 	 * update will consume the rest. This ensures ->clock_task is
270 	 * monotonic.
271 	 *
272 	 * It does however cause some slight miss-attribution of {soft,}irq
273 	 * time, a more accurate solution would be to update the irq_time using
274 	 * the current rq->clock timestamp, except that would require using
275 	 * atomic ops.
276 	 */
277 	if (irq_delta > delta)
278 		irq_delta = delta;
279 
280 	rq->prev_irq_time += irq_delta;
281 	delta -= irq_delta;
282 #endif
283 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
284 	if (static_key_false((&paravirt_steal_rq_enabled))) {
285 		steal = paravirt_steal_clock(cpu_of(rq));
286 		steal -= rq->prev_steal_time_rq;
287 
288 		if (unlikely(steal > delta))
289 			steal = delta;
290 
291 		rq->prev_steal_time_rq += steal;
292 		delta -= steal;
293 	}
294 #endif
295 
296 	rq->clock_task += delta;
297 
298 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
299 	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
300 		update_irq_load_avg(rq, irq_delta + steal);
301 #endif
302 	update_rq_clock_pelt(rq, delta);
303 }
304 
update_rq_clock(struct rq * rq)305 void update_rq_clock(struct rq *rq)
306 {
307 	s64 delta;
308 
309 	lockdep_assert_held(&rq->lock);
310 
311 	if (rq->clock_update_flags & RQCF_ACT_SKIP)
312 		return;
313 
314 #ifdef CONFIG_SCHED_DEBUG
315 	if (sched_feat(WARN_DOUBLE_CLOCK))
316 		SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
317 	rq->clock_update_flags |= RQCF_UPDATED;
318 #endif
319 
320 	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
321 	if (delta < 0)
322 		return;
323 	rq->clock += delta;
324 	update_rq_clock_task(rq, delta);
325 }
326 
327 static inline void
rq_csd_init(struct rq * rq,struct __call_single_data * csd,smp_call_func_t func)328 rq_csd_init(struct rq *rq, struct __call_single_data *csd, smp_call_func_t func)
329 {
330 	csd->flags = 0;
331 	csd->func = func;
332 	csd->info = rq;
333 }
334 
335 #ifdef CONFIG_SCHED_HRTICK
336 /*
337  * Use HR-timers to deliver accurate preemption points.
338  */
339 
hrtick_clear(struct rq * rq)340 static void hrtick_clear(struct rq *rq)
341 {
342 	if (hrtimer_active(&rq->hrtick_timer))
343 		hrtimer_cancel(&rq->hrtick_timer);
344 }
345 
346 /*
347  * High-resolution timer tick.
348  * Runs from hardirq context with interrupts disabled.
349  */
hrtick(struct hrtimer * timer)350 static enum hrtimer_restart hrtick(struct hrtimer *timer)
351 {
352 	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
353 	struct rq_flags rf;
354 
355 	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
356 
357 	rq_lock(rq, &rf);
358 	update_rq_clock(rq);
359 	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
360 	rq_unlock(rq, &rf);
361 
362 	return HRTIMER_NORESTART;
363 }
364 
365 #ifdef CONFIG_SMP
366 
__hrtick_restart(struct rq * rq)367 static void __hrtick_restart(struct rq *rq)
368 {
369 	struct hrtimer *timer = &rq->hrtick_timer;
370 	ktime_t time = rq->hrtick_time;
371 
372 	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
373 }
374 
375 /*
376  * called from hardirq (IPI) context
377  */
__hrtick_start(void * arg)378 static void __hrtick_start(void *arg)
379 {
380 	struct rq *rq = arg;
381 	struct rq_flags rf;
382 
383 	rq_lock(rq, &rf);
384 	__hrtick_restart(rq);
385 	rq_unlock(rq, &rf);
386 }
387 
388 /*
389  * Called to set the hrtick timer state.
390  *
391  * called with rq->lock held and irqs disabled
392  */
hrtick_start(struct rq * rq,u64 delay)393 void hrtick_start(struct rq *rq, u64 delay)
394 {
395 	struct hrtimer *timer = &rq->hrtick_timer;
396 	s64 delta;
397 
398 	/*
399 	 * Don't schedule slices shorter than 10000ns, that just
400 	 * doesn't make sense and can cause timer DoS.
401 	 */
402 	delta = max_t(s64, delay, 10000LL);
403 	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
404 
405 	if (rq == this_rq())
406 		__hrtick_restart(rq);
407 	else
408 		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
409 }
410 
411 #else
412 /*
413  * Called to set the hrtick timer state.
414  *
415  * called with rq->lock held and irqs disabled
416  */
hrtick_start(struct rq * rq,u64 delay)417 void hrtick_start(struct rq *rq, u64 delay)
418 {
419 	/*
420 	 * Don't schedule slices shorter than 10000ns, that just
421 	 * doesn't make sense. Rely on vruntime for fairness.
422 	 */
423 	delay = max_t(u64, delay, 10000LL);
424 	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
425 		      HRTIMER_MODE_REL_PINNED_HARD);
426 }
427 
428 #endif /* CONFIG_SMP */
429 
hrtick_rq_init(struct rq * rq)430 static void hrtick_rq_init(struct rq *rq)
431 {
432 #ifdef CONFIG_SMP
433 	rq_csd_init(rq, &rq->hrtick_csd, __hrtick_start);
434 #endif
435 	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
436 	rq->hrtick_timer.function = hrtick;
437 }
438 #else	/* CONFIG_SCHED_HRTICK */
hrtick_clear(struct rq * rq)439 static inline void hrtick_clear(struct rq *rq)
440 {
441 }
442 
hrtick_rq_init(struct rq * rq)443 static inline void hrtick_rq_init(struct rq *rq)
444 {
445 }
446 #endif	/* CONFIG_SCHED_HRTICK */
447 
448 /*
449  * cmpxchg based fetch_or, macro so it works for different integer types
450  */
451 #define fetch_or(ptr, mask)						\
452 	({								\
453 		typeof(ptr) _ptr = (ptr);				\
454 		typeof(mask) _mask = (mask);				\
455 		typeof(*_ptr) _old, _val = *_ptr;			\
456 									\
457 		for (;;) {						\
458 			_old = cmpxchg(_ptr, _val, _val | _mask);	\
459 			if (_old == _val)				\
460 				break;					\
461 			_val = _old;					\
462 		}							\
463 	_old;								\
464 })
465 
466 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
467 /*
468  * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
469  * this avoids any races wrt polling state changes and thereby avoids
470  * spurious IPIs.
471  */
set_nr_and_not_polling(struct task_struct * p)472 static bool set_nr_and_not_polling(struct task_struct *p)
473 {
474 	struct thread_info *ti = task_thread_info(p);
475 	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
476 }
477 
478 /*
479  * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
480  *
481  * If this returns true, then the idle task promises to call
482  * sched_ttwu_pending() and reschedule soon.
483  */
set_nr_if_polling(struct task_struct * p)484 static bool set_nr_if_polling(struct task_struct *p)
485 {
486 	struct thread_info *ti = task_thread_info(p);
487 	typeof(ti->flags) old, val = READ_ONCE(ti->flags);
488 
489 	for (;;) {
490 		if (!(val & _TIF_POLLING_NRFLAG))
491 			return false;
492 		if (val & _TIF_NEED_RESCHED)
493 			return true;
494 		old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
495 		if (old == val)
496 			break;
497 		val = old;
498 	}
499 	return true;
500 }
501 
502 #else
set_nr_and_not_polling(struct task_struct * p)503 static bool set_nr_and_not_polling(struct task_struct *p)
504 {
505 	set_tsk_need_resched(p);
506 	return true;
507 }
508 
509 #ifdef CONFIG_SMP
set_nr_if_polling(struct task_struct * p)510 static bool set_nr_if_polling(struct task_struct *p)
511 {
512 	return false;
513 }
514 #endif
515 #endif
516 
__wake_q_add(struct wake_q_head * head,struct task_struct * task)517 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
518 {
519 	struct wake_q_node *node = &task->wake_q;
520 
521 	/*
522 	 * Atomically grab the task, if ->wake_q is !nil already it means
523 	 * its already queued (either by us or someone else) and will get the
524 	 * wakeup due to that.
525 	 *
526 	 * In order to ensure that a pending wakeup will observe our pending
527 	 * state, even in the failed case, an explicit smp_mb() must be used.
528 	 */
529 	smp_mb__before_atomic();
530 	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
531 		return false;
532 
533 	/*
534 	 * The head is context local, there can be no concurrency.
535 	 */
536 	*head->lastp = node;
537 	head->lastp = &node->next;
538 	return true;
539 }
540 
541 /**
542  * wake_q_add() - queue a wakeup for 'later' waking.
543  * @head: the wake_q_head to add @task to
544  * @task: the task to queue for 'later' wakeup
545  *
546  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
547  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
548  * instantly.
549  *
550  * This function must be used as-if it were wake_up_process(); IOW the task
551  * must be ready to be woken at this location.
552  */
wake_q_add(struct wake_q_head * head,struct task_struct * task)553 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
554 {
555 	if (__wake_q_add(head, task))
556 		get_task_struct(task);
557 }
558 
559 /**
560  * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
561  * @head: the wake_q_head to add @task to
562  * @task: the task to queue for 'later' wakeup
563  *
564  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
565  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
566  * instantly.
567  *
568  * This function must be used as-if it were wake_up_process(); IOW the task
569  * must be ready to be woken at this location.
570  *
571  * This function is essentially a task-safe equivalent to wake_q_add(). Callers
572  * that already hold reference to @task can call the 'safe' version and trust
573  * wake_q to do the right thing depending whether or not the @task is already
574  * queued for wakeup.
575  */
wake_q_add_safe(struct wake_q_head * head,struct task_struct * task)576 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
577 {
578 	if (!__wake_q_add(head, task))
579 		put_task_struct(task);
580 }
581 
wake_up_q(struct wake_q_head * head)582 void wake_up_q(struct wake_q_head *head)
583 {
584 	struct wake_q_node *node = head->first;
585 
586 	while (node != WAKE_Q_TAIL) {
587 		struct task_struct *task;
588 
589 		task = container_of(node, struct task_struct, wake_q);
590 		BUG_ON(!task);
591 		/* Task can safely be re-inserted now: */
592 		node = node->next;
593 		task->wake_q.next = NULL;
594 
595 		/*
596 		 * wake_up_process() executes a full barrier, which pairs with
597 		 * the queueing in wake_q_add() so as not to miss wakeups.
598 		 */
599 		wake_up_process(task);
600 		put_task_struct(task);
601 	}
602 }
603 
604 /*
605  * resched_curr - mark rq's current task 'to be rescheduled now'.
606  *
607  * On UP this means the setting of the need_resched flag, on SMP it
608  * might also involve a cross-CPU call to trigger the scheduler on
609  * the target CPU.
610  */
resched_curr(struct rq * rq)611 void resched_curr(struct rq *rq)
612 {
613 	struct task_struct *curr = rq->curr;
614 	int cpu;
615 
616 	lockdep_assert_held(&rq->lock);
617 
618 	if (test_tsk_need_resched(curr))
619 		return;
620 
621 	cpu = cpu_of(rq);
622 
623 	if (cpu == smp_processor_id()) {
624 		set_tsk_need_resched(curr);
625 		set_preempt_need_resched();
626 		return;
627 	}
628 
629 	if (set_nr_and_not_polling(curr))
630 		smp_send_reschedule(cpu);
631 	else
632 		trace_sched_wake_idle_without_ipi(cpu);
633 }
634 
resched_cpu(int cpu)635 void resched_cpu(int cpu)
636 {
637 	struct rq *rq = cpu_rq(cpu);
638 	unsigned long flags;
639 
640 	raw_spin_lock_irqsave(&rq->lock, flags);
641 	if (cpu_online(cpu) || cpu == smp_processor_id())
642 		resched_curr(rq);
643 	raw_spin_unlock_irqrestore(&rq->lock, flags);
644 }
645 
646 #ifdef CONFIG_SMP
647 #ifdef CONFIG_NO_HZ_COMMON
648 /*
649  * In the semi idle case, use the nearest busy CPU for migrating timers
650  * from an idle CPU.  This is good for power-savings.
651  *
652  * We don't do similar optimization for completely idle system, as
653  * selecting an idle CPU will add more delays to the timers than intended
654  * (as that CPU's timer base may not be uptodate wrt jiffies etc).
655  */
get_nohz_timer_target(void)656 int get_nohz_timer_target(void)
657 {
658 	int i, cpu = smp_processor_id(), default_cpu = -1;
659 	struct sched_domain *sd;
660 
661 	if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
662 		if (!idle_cpu(cpu))
663 			return cpu;
664 		default_cpu = cpu;
665 	}
666 
667 	rcu_read_lock();
668 	for_each_domain(cpu, sd) {
669 		for_each_cpu_and(i, sched_domain_span(sd),
670 			housekeeping_cpumask(HK_FLAG_TIMER)) {
671 			if (cpu == i)
672 				continue;
673 
674 			if (!idle_cpu(i)) {
675 				cpu = i;
676 				goto unlock;
677 			}
678 		}
679 	}
680 
681 	if (default_cpu == -1)
682 		default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
683 	cpu = default_cpu;
684 unlock:
685 	rcu_read_unlock();
686 	return cpu;
687 }
688 
689 /*
690  * When add_timer_on() enqueues a timer into the timer wheel of an
691  * idle CPU then this timer might expire before the next timer event
692  * which is scheduled to wake up that CPU. In case of a completely
693  * idle system the next event might even be infinite time into the
694  * future. wake_up_idle_cpu() ensures that the CPU is woken up and
695  * leaves the inner idle loop so the newly added timer is taken into
696  * account when the CPU goes back to idle and evaluates the timer
697  * wheel for the next timer event.
698  */
wake_up_idle_cpu(int cpu)699 static void wake_up_idle_cpu(int cpu)
700 {
701 	struct rq *rq = cpu_rq(cpu);
702 
703 	if (cpu == smp_processor_id())
704 		return;
705 
706 	if (set_nr_and_not_polling(rq->idle))
707 		smp_send_reschedule(cpu);
708 	else
709 		trace_sched_wake_idle_without_ipi(cpu);
710 }
711 
wake_up_full_nohz_cpu(int cpu)712 static bool wake_up_full_nohz_cpu(int cpu)
713 {
714 	/*
715 	 * We just need the target to call irq_exit() and re-evaluate
716 	 * the next tick. The nohz full kick at least implies that.
717 	 * If needed we can still optimize that later with an
718 	 * empty IRQ.
719 	 */
720 	if (cpu_is_offline(cpu))
721 		return true;  /* Don't try to wake offline CPUs. */
722 	if (tick_nohz_full_cpu(cpu)) {
723 		if (cpu != smp_processor_id() ||
724 		    tick_nohz_tick_stopped())
725 			tick_nohz_full_kick_cpu(cpu);
726 		return true;
727 	}
728 
729 	return false;
730 }
731 
732 /*
733  * Wake up the specified CPU.  If the CPU is going offline, it is the
734  * caller's responsibility to deal with the lost wakeup, for example,
735  * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
736  */
wake_up_nohz_cpu(int cpu)737 void wake_up_nohz_cpu(int cpu)
738 {
739 	if (!wake_up_full_nohz_cpu(cpu))
740 		wake_up_idle_cpu(cpu);
741 }
742 
nohz_csd_func(void * info)743 static void nohz_csd_func(void *info)
744 {
745 	struct rq *rq = info;
746 	int cpu = cpu_of(rq);
747 	unsigned int flags;
748 
749 	/*
750 	 * Release the rq::nohz_csd.
751 	 */
752 	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
753 	WARN_ON(!(flags & NOHZ_KICK_MASK));
754 
755 	rq->idle_balance = idle_cpu(cpu);
756 	if (rq->idle_balance && !need_resched()) {
757 		rq->nohz_idle_balance = flags;
758 		raise_softirq_irqoff(SCHED_SOFTIRQ);
759 	}
760 }
761 
762 #endif /* CONFIG_NO_HZ_COMMON */
763 
764 #ifdef CONFIG_NO_HZ_FULL
sched_can_stop_tick(struct rq * rq)765 bool sched_can_stop_tick(struct rq *rq)
766 {
767 	int fifo_nr_running;
768 
769 	/* Deadline tasks, even if single, need the tick */
770 	if (rq->dl.dl_nr_running)
771 		return false;
772 
773 	/*
774 	 * If there are more than one RR tasks, we need the tick to effect the
775 	 * actual RR behaviour.
776 	 */
777 	if (rq->rt.rr_nr_running) {
778 		if (rq->rt.rr_nr_running == 1)
779 			return true;
780 		else
781 			return false;
782 	}
783 
784 	/*
785 	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
786 	 * forced preemption between FIFO tasks.
787 	 */
788 	fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
789 	if (fifo_nr_running)
790 		return true;
791 
792 	/*
793 	 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
794 	 * if there's more than one we need the tick for involuntary
795 	 * preemption.
796 	 */
797 	if (rq->nr_running > 1)
798 		return false;
799 
800 	return true;
801 }
802 #endif /* CONFIG_NO_HZ_FULL */
803 #endif /* CONFIG_SMP */
804 
805 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
806 			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
807 /*
808  * Iterate task_group tree rooted at *from, calling @down when first entering a
809  * node and @up when leaving it for the final time.
810  *
811  * Caller must hold rcu_lock or sufficient equivalent.
812  */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)813 int walk_tg_tree_from(struct task_group *from,
814 			     tg_visitor down, tg_visitor up, void *data)
815 {
816 	struct task_group *parent, *child;
817 	int ret;
818 
819 	parent = from;
820 
821 down:
822 	ret = (*down)(parent, data);
823 	if (ret)
824 		goto out;
825 	list_for_each_entry_rcu(child, &parent->children, siblings) {
826 		parent = child;
827 		goto down;
828 
829 up:
830 		continue;
831 	}
832 	ret = (*up)(parent, data);
833 	if (ret || parent == from)
834 		goto out;
835 
836 	child = parent;
837 	parent = parent->parent;
838 	if (parent)
839 		goto up;
840 out:
841 	return ret;
842 }
843 
tg_nop(struct task_group * tg,void * data)844 int tg_nop(struct task_group *tg, void *data)
845 {
846 	return 0;
847 }
848 #endif
849 
set_load_weight(struct task_struct * p,bool update_load)850 static void set_load_weight(struct task_struct *p, bool update_load)
851 {
852 	int prio = p->static_prio - MAX_RT_PRIO;
853 	struct load_weight *load = &p->se.load;
854 
855 	/*
856 	 * SCHED_IDLE tasks get minimal weight:
857 	 */
858 	if (task_has_idle_policy(p)) {
859 		load->weight = scale_load(WEIGHT_IDLEPRIO);
860 		load->inv_weight = WMULT_IDLEPRIO;
861 		return;
862 	}
863 
864 	/*
865 	 * SCHED_OTHER tasks have to update their load when changing their
866 	 * weight
867 	 */
868 	if (update_load && p->sched_class == &fair_sched_class) {
869 		reweight_task(p, prio);
870 	} else {
871 		load->weight = scale_load(sched_prio_to_weight[prio]);
872 		load->inv_weight = sched_prio_to_wmult[prio];
873 	}
874 }
875 
876 #ifdef CONFIG_SCHED_LATENCY_NICE
set_latency_weight(struct task_struct * p)877 static void set_latency_weight(struct task_struct *p)
878 {
879 	p->se.latency_weight = sched_latency_to_weight[p->latency_prio];
880 }
881 
__setscheduler_latency(struct task_struct * p,const struct sched_attr * attr)882 static void __setscheduler_latency(struct task_struct *p,
883 		const struct sched_attr *attr)
884 {
885 	if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE) {
886 		p->latency_prio = NICE_TO_LATENCY(attr->sched_latency_nice);
887 		set_latency_weight(p);
888 	}
889 }
890 
latency_nice_validate(struct task_struct * p,bool user,const struct sched_attr * attr)891 static int latency_nice_validate(struct task_struct *p, bool user,
892 				 const struct sched_attr *attr)
893 {
894 	if (attr->sched_latency_nice > MAX_LATENCY_NICE)
895 		return -EINVAL;
896 	if (attr->sched_latency_nice < MIN_LATENCY_NICE)
897 		return -EINVAL;
898 	/* Use the same security checks as NICE */
899 	if (user && attr->sched_latency_nice < LATENCY_TO_NICE(p->latency_prio)
900 	    && !capable(CAP_SYS_NICE))
901 		return -EPERM;
902 
903 	return 0;
904 }
905 #else
906 static void
__setscheduler_latency(struct task_struct * p,const struct sched_attr * attr)907 __setscheduler_latency(struct task_struct *p, const struct sched_attr *attr)
908 {
909 }
910 
911 static inline
latency_nice_validate(struct task_struct * p,bool user,const struct sched_attr * attr)912 int latency_nice_validate(struct task_struct *p, bool user,
913 			  const struct sched_attr *attr)
914 {
915 	return -EOPNOTSUPP;
916 }
917 #endif
918 
919 #ifdef CONFIG_UCLAMP_TASK
920 /*
921  * Serializes updates of utilization clamp values
922  *
923  * The (slow-path) user-space triggers utilization clamp value updates which
924  * can require updates on (fast-path) scheduler's data structures used to
925  * support enqueue/dequeue operations.
926  * While the per-CPU rq lock protects fast-path update operations, user-space
927  * requests are serialized using a mutex to reduce the risk of conflicting
928  * updates or API abuses.
929  */
930 static DEFINE_MUTEX(uclamp_mutex);
931 
932 /* Max allowed minimum utilization */
933 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
934 
935 /* Max allowed maximum utilization */
936 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
937 
938 /*
939  * By default RT tasks run at the maximum performance point/capacity of the
940  * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
941  * SCHED_CAPACITY_SCALE.
942  *
943  * This knob allows admins to change the default behavior when uclamp is being
944  * used. In battery powered devices, particularly, running at the maximum
945  * capacity and frequency will increase energy consumption and shorten the
946  * battery life.
947  *
948  * This knob only affects RT tasks that their uclamp_se->user_defined == false.
949  *
950  * This knob will not override the system default sched_util_clamp_min defined
951  * above.
952  */
953 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
954 
955 /* All clamps are required to be less or equal than these values */
956 static struct uclamp_se uclamp_default[UCLAMP_CNT];
957 
958 /*
959  * This static key is used to reduce the uclamp overhead in the fast path. It
960  * primarily disables the call to uclamp_rq_{inc, dec}() in
961  * enqueue/dequeue_task().
962  *
963  * This allows users to continue to enable uclamp in their kernel config with
964  * minimum uclamp overhead in the fast path.
965  *
966  * As soon as userspace modifies any of the uclamp knobs, the static key is
967  * enabled, since we have an actual users that make use of uclamp
968  * functionality.
969  *
970  * The knobs that would enable this static key are:
971  *
972  *   * A task modifying its uclamp value with sched_setattr().
973  *   * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
974  *   * An admin modifying the cgroup cpu.uclamp.{min, max}
975  */
976 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
977 
978 /* Integer rounded range for each bucket */
979 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
980 
981 #define for_each_clamp_id(clamp_id) \
982 	for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
983 
uclamp_bucket_id(unsigned int clamp_value)984 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
985 {
986 	return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
987 }
988 
uclamp_none(enum uclamp_id clamp_id)989 static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
990 {
991 	if (clamp_id == UCLAMP_MIN)
992 		return 0;
993 	return SCHED_CAPACITY_SCALE;
994 }
995 
uclamp_se_set(struct uclamp_se * uc_se,unsigned int value,bool user_defined)996 static inline void uclamp_se_set(struct uclamp_se *uc_se,
997 				 unsigned int value, bool user_defined)
998 {
999 	uc_se->value = value;
1000 	uc_se->bucket_id = uclamp_bucket_id(value);
1001 	uc_se->user_defined = user_defined;
1002 }
1003 
1004 static inline unsigned int
uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1005 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1006 		  unsigned int clamp_value)
1007 {
1008 	/*
1009 	 * Avoid blocked utilization pushing up the frequency when we go
1010 	 * idle (which drops the max-clamp) by retaining the last known
1011 	 * max-clamp.
1012 	 */
1013 	if (clamp_id == UCLAMP_MAX) {
1014 		rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1015 		return clamp_value;
1016 	}
1017 
1018 	return uclamp_none(UCLAMP_MIN);
1019 }
1020 
uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1021 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1022 				     unsigned int clamp_value)
1023 {
1024 	/* Reset max-clamp retention only on idle exit */
1025 	if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1026 		return;
1027 
1028 	WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value);
1029 }
1030 
1031 static inline
uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1032 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1033 				   unsigned int clamp_value)
1034 {
1035 	struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1036 	int bucket_id = UCLAMP_BUCKETS - 1;
1037 
1038 	/*
1039 	 * Since both min and max clamps are max aggregated, find the
1040 	 * top most bucket with tasks in.
1041 	 */
1042 	for ( ; bucket_id >= 0; bucket_id--) {
1043 		if (!bucket[bucket_id].tasks)
1044 			continue;
1045 		return bucket[bucket_id].value;
1046 	}
1047 
1048 	/* No tasks -- default clamp values */
1049 	return uclamp_idle_value(rq, clamp_id, clamp_value);
1050 }
1051 
__uclamp_update_util_min_rt_default(struct task_struct * p)1052 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1053 {
1054 	unsigned int default_util_min;
1055 	struct uclamp_se *uc_se;
1056 
1057 	lockdep_assert_held(&p->pi_lock);
1058 
1059 	uc_se = &p->uclamp_req[UCLAMP_MIN];
1060 
1061 	/* Only sync if user didn't override the default */
1062 	if (uc_se->user_defined)
1063 		return;
1064 
1065 	default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1066 	uclamp_se_set(uc_se, default_util_min, false);
1067 }
1068 
uclamp_update_util_min_rt_default(struct task_struct * p)1069 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1070 {
1071 	struct rq_flags rf;
1072 	struct rq *rq;
1073 
1074 	if (!rt_task(p))
1075 		return;
1076 
1077 	/* Protect updates to p->uclamp_* */
1078 	rq = task_rq_lock(p, &rf);
1079 	__uclamp_update_util_min_rt_default(p);
1080 	task_rq_unlock(rq, p, &rf);
1081 }
1082 
uclamp_sync_util_min_rt_default(void)1083 static void uclamp_sync_util_min_rt_default(void)
1084 {
1085 	struct task_struct *g, *p;
1086 
1087 	/*
1088 	 * copy_process()			sysctl_uclamp
1089 	 *					  uclamp_min_rt = X;
1090 	 *   write_lock(&tasklist_lock)		  read_lock(&tasklist_lock)
1091 	 *   // link thread			  smp_mb__after_spinlock()
1092 	 *   write_unlock(&tasklist_lock)	  read_unlock(&tasklist_lock);
1093 	 *   sched_post_fork()			  for_each_process_thread()
1094 	 *     __uclamp_sync_rt()		    __uclamp_sync_rt()
1095 	 *
1096 	 * Ensures that either sched_post_fork() will observe the new
1097 	 * uclamp_min_rt or for_each_process_thread() will observe the new
1098 	 * task.
1099 	 */
1100 	read_lock(&tasklist_lock);
1101 	smp_mb__after_spinlock();
1102 	read_unlock(&tasklist_lock);
1103 
1104 	rcu_read_lock();
1105 	for_each_process_thread(g, p)
1106 		uclamp_update_util_min_rt_default(p);
1107 	rcu_read_unlock();
1108 }
1109 
1110 static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct * p,enum uclamp_id clamp_id)1111 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1112 {
1113 	/* Copy by value as we could modify it */
1114 	struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1115 #ifdef CONFIG_UCLAMP_TASK_GROUP
1116 	unsigned int tg_min, tg_max, value;
1117 
1118 	/*
1119 	 * Tasks in autogroups or root task group will be
1120 	 * restricted by system defaults.
1121 	 */
1122 	if (task_group_is_autogroup(task_group(p)))
1123 		return uc_req;
1124 	if (task_group(p) == &root_task_group)
1125 		return uc_req;
1126 
1127 	tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1128 	tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1129 	value = uc_req.value;
1130 	value = clamp(value, tg_min, tg_max);
1131 	uclamp_se_set(&uc_req, value, false);
1132 #endif
1133 
1134 	return uc_req;
1135 }
1136 
1137 /*
1138  * The effective clamp bucket index of a task depends on, by increasing
1139  * priority:
1140  * - the task specific clamp value, when explicitly requested from userspace
1141  * - the task group effective clamp value, for tasks not either in the root
1142  *   group or in an autogroup
1143  * - the system default clamp value, defined by the sysadmin
1144  */
1145 static inline struct uclamp_se
uclamp_eff_get(struct task_struct * p,enum uclamp_id clamp_id)1146 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1147 {
1148 	struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1149 	struct uclamp_se uc_max = uclamp_default[clamp_id];
1150 
1151 	/* System default restrictions always apply */
1152 	if (unlikely(uc_req.value > uc_max.value))
1153 		return uc_max;
1154 
1155 	return uc_req;
1156 }
1157 
uclamp_eff_value(struct task_struct * p,enum uclamp_id clamp_id)1158 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1159 {
1160 	struct uclamp_se uc_eff;
1161 
1162 	/* Task currently refcounted: use back-annotated (effective) value */
1163 	if (p->uclamp[clamp_id].active)
1164 		return (unsigned long)p->uclamp[clamp_id].value;
1165 
1166 	uc_eff = uclamp_eff_get(p, clamp_id);
1167 
1168 	return (unsigned long)uc_eff.value;
1169 }
1170 
1171 /*
1172  * When a task is enqueued on a rq, the clamp bucket currently defined by the
1173  * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1174  * updates the rq's clamp value if required.
1175  *
1176  * Tasks can have a task-specific value requested from user-space, track
1177  * within each bucket the maximum value for tasks refcounted in it.
1178  * This "local max aggregation" allows to track the exact "requested" value
1179  * for each bucket when all its RUNNABLE tasks require the same clamp.
1180  */
uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1181 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1182 				    enum uclamp_id clamp_id)
1183 {
1184 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1185 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1186 	struct uclamp_bucket *bucket;
1187 
1188 	lockdep_assert_held(&rq->lock);
1189 
1190 	/* Update task effective clamp */
1191 	p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1192 
1193 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1194 	bucket->tasks++;
1195 	uc_se->active = true;
1196 
1197 	uclamp_idle_reset(rq, clamp_id, uc_se->value);
1198 
1199 	/*
1200 	 * Local max aggregation: rq buckets always track the max
1201 	 * "requested" clamp value of its RUNNABLE tasks.
1202 	 */
1203 	if (bucket->tasks == 1 || uc_se->value > bucket->value)
1204 		bucket->value = uc_se->value;
1205 
1206 	if (uc_se->value > READ_ONCE(uc_rq->value))
1207 		WRITE_ONCE(uc_rq->value, uc_se->value);
1208 }
1209 
1210 /*
1211  * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1212  * is released. If this is the last task reference counting the rq's max
1213  * active clamp value, then the rq's clamp value is updated.
1214  *
1215  * Both refcounted tasks and rq's cached clamp values are expected to be
1216  * always valid. If it's detected they are not, as defensive programming,
1217  * enforce the expected state and warn.
1218  */
uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1219 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1220 				    enum uclamp_id clamp_id)
1221 {
1222 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1223 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1224 	struct uclamp_bucket *bucket;
1225 	unsigned int bkt_clamp;
1226 	unsigned int rq_clamp;
1227 
1228 	lockdep_assert_held(&rq->lock);
1229 
1230 	/*
1231 	 * If sched_uclamp_used was enabled after task @p was enqueued,
1232 	 * we could end up with unbalanced call to uclamp_rq_dec_id().
1233 	 *
1234 	 * In this case the uc_se->active flag should be false since no uclamp
1235 	 * accounting was performed at enqueue time and we can just return
1236 	 * here.
1237 	 *
1238 	 * Need to be careful of the following enqeueue/dequeue ordering
1239 	 * problem too
1240 	 *
1241 	 *	enqueue(taskA)
1242 	 *	// sched_uclamp_used gets enabled
1243 	 *	enqueue(taskB)
1244 	 *	dequeue(taskA)
1245 	 *	// Must not decrement bukcet->tasks here
1246 	 *	dequeue(taskB)
1247 	 *
1248 	 * where we could end up with stale data in uc_se and
1249 	 * bucket[uc_se->bucket_id].
1250 	 *
1251 	 * The following check here eliminates the possibility of such race.
1252 	 */
1253 	if (unlikely(!uc_se->active))
1254 		return;
1255 
1256 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1257 
1258 	SCHED_WARN_ON(!bucket->tasks);
1259 	if (likely(bucket->tasks))
1260 		bucket->tasks--;
1261 
1262 	uc_se->active = false;
1263 
1264 	/*
1265 	 * Keep "local max aggregation" simple and accept to (possibly)
1266 	 * overboost some RUNNABLE tasks in the same bucket.
1267 	 * The rq clamp bucket value is reset to its base value whenever
1268 	 * there are no more RUNNABLE tasks refcounting it.
1269 	 */
1270 	if (likely(bucket->tasks))
1271 		return;
1272 
1273 	rq_clamp = READ_ONCE(uc_rq->value);
1274 	/*
1275 	 * Defensive programming: this should never happen. If it happens,
1276 	 * e.g. due to future modification, warn and fixup the expected value.
1277 	 */
1278 	SCHED_WARN_ON(bucket->value > rq_clamp);
1279 	if (bucket->value >= rq_clamp) {
1280 		bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1281 		WRITE_ONCE(uc_rq->value, bkt_clamp);
1282 	}
1283 }
1284 
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1285 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1286 {
1287 	enum uclamp_id clamp_id;
1288 
1289 	/*
1290 	 * Avoid any overhead until uclamp is actually used by the userspace.
1291 	 *
1292 	 * The condition is constructed such that a NOP is generated when
1293 	 * sched_uclamp_used is disabled.
1294 	 */
1295 	if (!static_branch_unlikely(&sched_uclamp_used))
1296 		return;
1297 
1298 	if (unlikely(!p->sched_class->uclamp_enabled))
1299 		return;
1300 
1301 	for_each_clamp_id(clamp_id)
1302 		uclamp_rq_inc_id(rq, p, clamp_id);
1303 
1304 	/* Reset clamp idle holding when there is one RUNNABLE task */
1305 	if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1306 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1307 }
1308 
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1309 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1310 {
1311 	enum uclamp_id clamp_id;
1312 
1313 	/*
1314 	 * Avoid any overhead until uclamp is actually used by the userspace.
1315 	 *
1316 	 * The condition is constructed such that a NOP is generated when
1317 	 * sched_uclamp_used is disabled.
1318 	 */
1319 	if (!static_branch_unlikely(&sched_uclamp_used))
1320 		return;
1321 
1322 	if (unlikely(!p->sched_class->uclamp_enabled))
1323 		return;
1324 
1325 	for_each_clamp_id(clamp_id)
1326 		uclamp_rq_dec_id(rq, p, clamp_id);
1327 }
1328 
uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1329 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1330 				      enum uclamp_id clamp_id)
1331 {
1332 	if (!p->uclamp[clamp_id].active)
1333 		return;
1334 
1335 	uclamp_rq_dec_id(rq, p, clamp_id);
1336 	uclamp_rq_inc_id(rq, p, clamp_id);
1337 
1338 	/*
1339 	 * Make sure to clear the idle flag if we've transiently reached 0
1340 	 * active tasks on rq.
1341 	 */
1342 	if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1343 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1344 }
1345 
1346 static inline void
uclamp_update_active(struct task_struct * p)1347 uclamp_update_active(struct task_struct *p)
1348 {
1349 	enum uclamp_id clamp_id;
1350 	struct rq_flags rf;
1351 	struct rq *rq;
1352 
1353 	/*
1354 	 * Lock the task and the rq where the task is (or was) queued.
1355 	 *
1356 	 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1357 	 * price to pay to safely serialize util_{min,max} updates with
1358 	 * enqueues, dequeues and migration operations.
1359 	 * This is the same locking schema used by __set_cpus_allowed_ptr().
1360 	 */
1361 	rq = task_rq_lock(p, &rf);
1362 
1363 	/*
1364 	 * Setting the clamp bucket is serialized by task_rq_lock().
1365 	 * If the task is not yet RUNNABLE and its task_struct is not
1366 	 * affecting a valid clamp bucket, the next time it's enqueued,
1367 	 * it will already see the updated clamp bucket value.
1368 	 */
1369 	for_each_clamp_id(clamp_id)
1370 		uclamp_rq_reinc_id(rq, p, clamp_id);
1371 
1372 	task_rq_unlock(rq, p, &rf);
1373 }
1374 
1375 #ifdef CONFIG_UCLAMP_TASK_GROUP
1376 static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state * css)1377 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1378 {
1379 	struct css_task_iter it;
1380 	struct task_struct *p;
1381 
1382 	css_task_iter_start(css, 0, &it);
1383 	while ((p = css_task_iter_next(&it)))
1384 		uclamp_update_active(p);
1385 	css_task_iter_end(&it);
1386 }
1387 
1388 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
uclamp_update_root_tg(void)1389 static void uclamp_update_root_tg(void)
1390 {
1391 	struct task_group *tg = &root_task_group;
1392 
1393 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1394 		      sysctl_sched_uclamp_util_min, false);
1395 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1396 		      sysctl_sched_uclamp_util_max, false);
1397 
1398 	rcu_read_lock();
1399 	cpu_util_update_eff(&root_task_group.css);
1400 	rcu_read_unlock();
1401 }
1402 #else
uclamp_update_root_tg(void)1403 static void uclamp_update_root_tg(void) { }
1404 #endif
1405 
sysctl_sched_uclamp_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1406 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
1407 				void *buffer, size_t *lenp, loff_t *ppos)
1408 {
1409 	bool update_root_tg = false;
1410 	int old_min, old_max, old_min_rt;
1411 	int result;
1412 
1413 	mutex_lock(&uclamp_mutex);
1414 	old_min = sysctl_sched_uclamp_util_min;
1415 	old_max = sysctl_sched_uclamp_util_max;
1416 	old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1417 
1418 	result = proc_dointvec(table, write, buffer, lenp, ppos);
1419 	if (result)
1420 		goto undo;
1421 	if (!write)
1422 		goto done;
1423 
1424 	if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1425 	    sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE	||
1426 	    sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1427 
1428 		result = -EINVAL;
1429 		goto undo;
1430 	}
1431 
1432 	if (old_min != sysctl_sched_uclamp_util_min) {
1433 		uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1434 			      sysctl_sched_uclamp_util_min, false);
1435 		update_root_tg = true;
1436 	}
1437 	if (old_max != sysctl_sched_uclamp_util_max) {
1438 		uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1439 			      sysctl_sched_uclamp_util_max, false);
1440 		update_root_tg = true;
1441 	}
1442 
1443 	if (update_root_tg) {
1444 		static_branch_enable(&sched_uclamp_used);
1445 		uclamp_update_root_tg();
1446 	}
1447 
1448 	if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1449 		static_branch_enable(&sched_uclamp_used);
1450 		uclamp_sync_util_min_rt_default();
1451 	}
1452 
1453 	/*
1454 	 * We update all RUNNABLE tasks only when task groups are in use.
1455 	 * Otherwise, keep it simple and do just a lazy update at each next
1456 	 * task enqueue time.
1457 	 */
1458 
1459 	goto done;
1460 
1461 undo:
1462 	sysctl_sched_uclamp_util_min = old_min;
1463 	sysctl_sched_uclamp_util_max = old_max;
1464 	sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1465 done:
1466 	mutex_unlock(&uclamp_mutex);
1467 
1468 	return result;
1469 }
1470 
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)1471 static int uclamp_validate(struct task_struct *p,
1472 			   const struct sched_attr *attr)
1473 {
1474 	unsigned int lower_bound = p->uclamp_req[UCLAMP_MIN].value;
1475 	unsigned int upper_bound = p->uclamp_req[UCLAMP_MAX].value;
1476 
1477 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN)
1478 		lower_bound = attr->sched_util_min;
1479 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX)
1480 		upper_bound = attr->sched_util_max;
1481 
1482 	if (lower_bound > upper_bound)
1483 		return -EINVAL;
1484 	if (upper_bound > SCHED_CAPACITY_SCALE)
1485 		return -EINVAL;
1486 
1487 	/*
1488 	 * We have valid uclamp attributes; make sure uclamp is enabled.
1489 	 *
1490 	 * We need to do that here, because enabling static branches is a
1491 	 * blocking operation which obviously cannot be done while holding
1492 	 * scheduler locks.
1493 	 */
1494 	static_branch_enable(&sched_uclamp_used);
1495 
1496 	return 0;
1497 }
1498 
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)1499 static void __setscheduler_uclamp(struct task_struct *p,
1500 				  const struct sched_attr *attr)
1501 {
1502 	enum uclamp_id clamp_id;
1503 
1504 	/*
1505 	 * On scheduling class change, reset to default clamps for tasks
1506 	 * without a task-specific value.
1507 	 */
1508 	for_each_clamp_id(clamp_id) {
1509 		struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
1510 
1511 		/* Keep using defined clamps across class changes */
1512 		if (uc_se->user_defined)
1513 			continue;
1514 
1515 		/*
1516 		 * RT by default have a 100% boost value that could be modified
1517 		 * at runtime.
1518 		 */
1519 		if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
1520 			__uclamp_update_util_min_rt_default(p);
1521 		else
1522 			uclamp_se_set(uc_se, uclamp_none(clamp_id), false);
1523 
1524 	}
1525 
1526 	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
1527 		return;
1528 
1529 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
1530 		uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
1531 			      attr->sched_util_min, true);
1532 	}
1533 
1534 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
1535 		uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
1536 			      attr->sched_util_max, true);
1537 	}
1538 }
1539 
uclamp_fork(struct task_struct * p)1540 static void uclamp_fork(struct task_struct *p)
1541 {
1542 	enum uclamp_id clamp_id;
1543 
1544 	/*
1545 	 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1546 	 * as the task is still at its early fork stages.
1547 	 */
1548 	for_each_clamp_id(clamp_id)
1549 		p->uclamp[clamp_id].active = false;
1550 
1551 	if (likely(!p->sched_reset_on_fork))
1552 		return;
1553 
1554 	for_each_clamp_id(clamp_id) {
1555 		uclamp_se_set(&p->uclamp_req[clamp_id],
1556 			      uclamp_none(clamp_id), false);
1557 	}
1558 }
1559 
uclamp_post_fork(struct task_struct * p)1560 static void uclamp_post_fork(struct task_struct *p)
1561 {
1562 	uclamp_update_util_min_rt_default(p);
1563 }
1564 
init_uclamp_rq(struct rq * rq)1565 static void __init init_uclamp_rq(struct rq *rq)
1566 {
1567 	enum uclamp_id clamp_id;
1568 	struct uclamp_rq *uc_rq = rq->uclamp;
1569 
1570 	for_each_clamp_id(clamp_id) {
1571 		uc_rq[clamp_id] = (struct uclamp_rq) {
1572 			.value = uclamp_none(clamp_id)
1573 		};
1574 	}
1575 
1576 	rq->uclamp_flags = UCLAMP_FLAG_IDLE;
1577 }
1578 
init_uclamp(void)1579 static void __init init_uclamp(void)
1580 {
1581 	struct uclamp_se uc_max = {};
1582 	enum uclamp_id clamp_id;
1583 	int cpu;
1584 
1585 	for_each_possible_cpu(cpu)
1586 		init_uclamp_rq(cpu_rq(cpu));
1587 
1588 	for_each_clamp_id(clamp_id) {
1589 		uclamp_se_set(&init_task.uclamp_req[clamp_id],
1590 			      uclamp_none(clamp_id), false);
1591 	}
1592 
1593 	/* System defaults allow max clamp values for both indexes */
1594 	uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
1595 	for_each_clamp_id(clamp_id) {
1596 		uclamp_default[clamp_id] = uc_max;
1597 #ifdef CONFIG_UCLAMP_TASK_GROUP
1598 		root_task_group.uclamp_req[clamp_id] = uc_max;
1599 		root_task_group.uclamp[clamp_id] = uc_max;
1600 #endif
1601 	}
1602 }
1603 
1604 #else /* CONFIG_UCLAMP_TASK */
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1605 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1606 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)1607 static inline int uclamp_validate(struct task_struct *p,
1608 				  const struct sched_attr *attr)
1609 {
1610 	return -EOPNOTSUPP;
1611 }
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)1612 static void __setscheduler_uclamp(struct task_struct *p,
1613 				  const struct sched_attr *attr) { }
uclamp_fork(struct task_struct * p)1614 static inline void uclamp_fork(struct task_struct *p) { }
uclamp_post_fork(struct task_struct * p)1615 static inline void uclamp_post_fork(struct task_struct *p) { }
init_uclamp(void)1616 static inline void init_uclamp(void) { }
1617 #endif /* CONFIG_UCLAMP_TASK */
1618 
enqueue_task(struct rq * rq,struct task_struct * p,int flags)1619 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1620 {
1621 	if (!(flags & ENQUEUE_NOCLOCK))
1622 		update_rq_clock(rq);
1623 
1624 	if (!(flags & ENQUEUE_RESTORE)) {
1625 		sched_info_queued(rq, p);
1626 		psi_enqueue(p, flags & ENQUEUE_WAKEUP);
1627 	}
1628 
1629 	uclamp_rq_inc(rq, p);
1630 	p->sched_class->enqueue_task(rq, p, flags);
1631 }
1632 
dequeue_task(struct rq * rq,struct task_struct * p,int flags)1633 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1634 {
1635 	if (!(flags & DEQUEUE_NOCLOCK))
1636 		update_rq_clock(rq);
1637 
1638 	if (!(flags & DEQUEUE_SAVE)) {
1639 		sched_info_dequeued(rq, p);
1640 		psi_dequeue(p, flags & DEQUEUE_SLEEP);
1641 	}
1642 
1643 	uclamp_rq_dec(rq, p);
1644 	p->sched_class->dequeue_task(rq, p, flags);
1645 }
1646 
activate_task(struct rq * rq,struct task_struct * p,int flags)1647 void activate_task(struct rq *rq, struct task_struct *p, int flags)
1648 {
1649 	enqueue_task(rq, p, flags);
1650 
1651 	p->on_rq = TASK_ON_RQ_QUEUED;
1652 }
1653 
deactivate_task(struct rq * rq,struct task_struct * p,int flags)1654 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1655 {
1656 	p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
1657 
1658 	dequeue_task(rq, p, flags);
1659 }
1660 
__normal_prio(int policy,int rt_prio,int nice)1661 static inline int __normal_prio(int policy, int rt_prio, int nice)
1662 {
1663 	int prio;
1664 
1665 	if (dl_policy(policy))
1666 		prio = MAX_DL_PRIO - 1;
1667 	else if (rt_policy(policy))
1668 		prio = MAX_RT_PRIO - 1 - rt_prio;
1669 	else
1670 		prio = NICE_TO_PRIO(nice);
1671 
1672 	return prio;
1673 }
1674 
1675 /*
1676  * Calculate the expected normal priority: i.e. priority
1677  * without taking RT-inheritance into account. Might be
1678  * boosted by interactivity modifiers. Changes upon fork,
1679  * setprio syscalls, and whenever the interactivity
1680  * estimator recalculates.
1681  */
normal_prio(struct task_struct * p)1682 static inline int normal_prio(struct task_struct *p)
1683 {
1684 	return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
1685 }
1686 
1687 /*
1688  * Calculate the current priority, i.e. the priority
1689  * taken into account by the scheduler. This value might
1690  * be boosted by RT tasks, or might be boosted by
1691  * interactivity modifiers. Will be RT if the task got
1692  * RT-boosted. If not then it returns p->normal_prio.
1693  */
effective_prio(struct task_struct * p)1694 static int effective_prio(struct task_struct *p)
1695 {
1696 	p->normal_prio = normal_prio(p);
1697 	/*
1698 	 * If we are RT tasks or we were boosted to RT priority,
1699 	 * keep the priority unchanged. Otherwise, update priority
1700 	 * to the normal priority:
1701 	 */
1702 	if (!rt_prio(p->prio))
1703 		return p->normal_prio;
1704 	return p->prio;
1705 }
1706 
1707 /**
1708  * task_curr - is this task currently executing on a CPU?
1709  * @p: the task in question.
1710  *
1711  * Return: 1 if the task is currently executing. 0 otherwise.
1712  */
task_curr(const struct task_struct * p)1713 inline int task_curr(const struct task_struct *p)
1714 {
1715 	return cpu_curr(task_cpu(p)) == p;
1716 }
1717 
1718 /*
1719  * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
1720  * use the balance_callback list if you want balancing.
1721  *
1722  * this means any call to check_class_changed() must be followed by a call to
1723  * balance_callback().
1724  */
check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio)1725 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1726 				       const struct sched_class *prev_class,
1727 				       int oldprio)
1728 {
1729 	if (prev_class != p->sched_class) {
1730 		if (prev_class->switched_from)
1731 			prev_class->switched_from(rq, p);
1732 
1733 		p->sched_class->switched_to(rq, p);
1734 	} else if (oldprio != p->prio || dl_task(p))
1735 		p->sched_class->prio_changed(rq, p, oldprio);
1736 }
1737 
check_preempt_curr(struct rq * rq,struct task_struct * p,int flags)1738 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1739 {
1740 	if (p->sched_class == rq->curr->sched_class)
1741 		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1742 	else if (p->sched_class > rq->curr->sched_class)
1743 		resched_curr(rq);
1744 
1745 	/*
1746 	 * A queue event has occurred, and we're going to schedule.  In
1747 	 * this case, we can save a useless back to back clock update.
1748 	 */
1749 	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
1750 		rq_clock_skip_update(rq);
1751 }
1752 
1753 #ifdef CONFIG_SMP
1754 
1755 /*
1756  * Per-CPU kthreads are allowed to run on !active && online CPUs, see
1757  * __set_cpus_allowed_ptr() and select_fallback_rq().
1758  */
is_cpu_allowed(struct task_struct * p,int cpu)1759 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
1760 {
1761 	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
1762 		return false;
1763 
1764 	if (is_per_cpu_kthread(p))
1765 		return cpu_online(cpu);
1766 
1767 	return cpu_active(cpu);
1768 }
1769 
1770 /*
1771  * This is how migration works:
1772  *
1773  * 1) we invoke migration_cpu_stop() on the target CPU using
1774  *    stop_one_cpu().
1775  * 2) stopper starts to run (implicitly forcing the migrated thread
1776  *    off the CPU)
1777  * 3) it checks whether the migrated task is still in the wrong runqueue.
1778  * 4) if it's in the wrong runqueue then the migration thread removes
1779  *    it and puts it into the right queue.
1780  * 5) stopper completes and stop_one_cpu() returns and the migration
1781  *    is done.
1782  */
1783 
1784 /*
1785  * move_queued_task - move a queued task to new rq.
1786  *
1787  * Returns (locked) new rq. Old rq's lock is released.
1788  */
move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu)1789 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
1790 				   struct task_struct *p, int new_cpu)
1791 {
1792 	lockdep_assert_held(&rq->lock);
1793 
1794 	deactivate_task(rq, p, DEQUEUE_NOCLOCK);
1795 #ifdef CONFIG_SCHED_WALT
1796 	double_lock_balance(rq, cpu_rq(new_cpu));
1797 	if (!(rq->clock_update_flags & RQCF_UPDATED))
1798 		update_rq_clock(rq);
1799 #endif
1800 	set_task_cpu(p, new_cpu);
1801 #ifdef CONFIG_SCHED_WALT
1802 	double_rq_unlock(cpu_rq(new_cpu), rq);
1803 #else
1804 	rq_unlock(rq, rf);
1805 #endif
1806 
1807 	rq = cpu_rq(new_cpu);
1808 
1809 	rq_lock(rq, rf);
1810 	BUG_ON(task_cpu(p) != new_cpu);
1811 	activate_task(rq, p, 0);
1812 	check_preempt_curr(rq, p, 0);
1813 
1814 	return rq;
1815 }
1816 
1817 struct migration_arg {
1818 	struct task_struct *task;
1819 	int dest_cpu;
1820 };
1821 
1822 /*
1823  * Move (not current) task off this CPU, onto the destination CPU. We're doing
1824  * this because either it can't run here any more (set_cpus_allowed()
1825  * away from this CPU, or CPU going down), or because we're
1826  * attempting to rebalance this task on exec (sched_exec).
1827  *
1828  * So we race with normal scheduler movements, but that's OK, as long
1829  * as the task is no longer on this CPU.
1830  */
__migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu)1831 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
1832 				 struct task_struct *p, int dest_cpu)
1833 {
1834 	/* Affinity changed (again). */
1835 	if (!is_cpu_allowed(p, dest_cpu))
1836 		return rq;
1837 
1838 	update_rq_clock(rq);
1839 	rq = move_queued_task(rq, rf, p, dest_cpu);
1840 
1841 	return rq;
1842 }
1843 
1844 /*
1845  * migration_cpu_stop - this will be executed by a highprio stopper thread
1846  * and performs thread migration by bumping thread off CPU then
1847  * 'pushing' onto another runqueue.
1848  */
migration_cpu_stop(void * data)1849 static int migration_cpu_stop(void *data)
1850 {
1851 	struct migration_arg *arg = data;
1852 	struct task_struct *p = arg->task;
1853 	struct rq *rq = this_rq();
1854 	struct rq_flags rf;
1855 
1856 	/*
1857 	 * The original target CPU might have gone down and we might
1858 	 * be on another CPU but it doesn't matter.
1859 	 */
1860 	local_irq_disable();
1861 	/*
1862 	 * We need to explicitly wake pending tasks before running
1863 	 * __migrate_task() such that we will not miss enforcing cpus_ptr
1864 	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1865 	 */
1866 	flush_smp_call_function_from_idle();
1867 
1868 	raw_spin_lock(&p->pi_lock);
1869 	rq_lock(rq, &rf);
1870 	/*
1871 	 * If task_rq(p) != rq, it cannot be migrated here, because we're
1872 	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1873 	 * we're holding p->pi_lock.
1874 	 */
1875 	if (task_rq(p) == rq) {
1876 		if (task_on_rq_queued(p))
1877 			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
1878 		else
1879 			p->wake_cpu = arg->dest_cpu;
1880 	}
1881 	rq_unlock(rq, &rf);
1882 	raw_spin_unlock(&p->pi_lock);
1883 
1884 	local_irq_enable();
1885 	return 0;
1886 }
1887 
1888 /*
1889  * sched_class::set_cpus_allowed must do the below, but is not required to
1890  * actually call this function.
1891  */
set_cpus_allowed_common(struct task_struct * p,const struct cpumask * new_mask)1892 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
1893 {
1894 	cpumask_copy(&p->cpus_mask, new_mask);
1895 	p->nr_cpus_allowed = cpumask_weight(new_mask);
1896 }
1897 
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)1898 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1899 {
1900 	struct rq *rq = task_rq(p);
1901 	bool queued, running;
1902 
1903 	lockdep_assert_held(&p->pi_lock);
1904 
1905 	queued = task_on_rq_queued(p);
1906 	running = task_current(rq, p);
1907 
1908 	if (queued) {
1909 		/*
1910 		 * Because __kthread_bind() calls this on blocked tasks without
1911 		 * holding rq->lock.
1912 		 */
1913 		lockdep_assert_held(&rq->lock);
1914 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
1915 	}
1916 	if (running)
1917 		put_prev_task(rq, p);
1918 
1919 	p->sched_class->set_cpus_allowed(p, new_mask);
1920 
1921 	if (queued)
1922 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
1923 	if (running)
1924 		set_next_task(rq, p);
1925 }
1926 
1927 /*
1928  * Change a given task's CPU affinity. Migrate the thread to a
1929  * proper CPU and schedule it away if the CPU it's executing on
1930  * is removed from the allowed bitmask.
1931  *
1932  * NOTE: the caller must have a valid reference to the task, the
1933  * task must not exit() & deallocate itself prematurely. The
1934  * call is not atomic; no spinlocks may be held.
1935  */
__set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask,bool check)1936 static int __set_cpus_allowed_ptr(struct task_struct *p,
1937 				  const struct cpumask *new_mask, bool check)
1938 {
1939 	const struct cpumask *cpu_valid_mask = cpu_active_mask;
1940 	unsigned int dest_cpu;
1941 	struct rq_flags rf;
1942 	struct rq *rq;
1943 	int ret = 0;
1944 #ifdef CONFIG_CPU_ISOLATION_OPT
1945 	cpumask_t allowed_mask;
1946 #endif
1947 
1948 	rq = task_rq_lock(p, &rf);
1949 	update_rq_clock(rq);
1950 
1951 	if (p->flags & PF_KTHREAD) {
1952 		/*
1953 		 * Kernel threads are allowed on online && !active CPUs
1954 		 */
1955 		cpu_valid_mask = cpu_online_mask;
1956 	}
1957 
1958 	/*
1959 	 * Must re-check here, to close a race against __kthread_bind(),
1960 	 * sched_setaffinity() is not guaranteed to observe the flag.
1961 	 */
1962 	if (check && (p->flags & PF_NO_SETAFFINITY)) {
1963 		ret = -EINVAL;
1964 		goto out;
1965 	}
1966 
1967 	if (cpumask_equal(&p->cpus_mask, new_mask))
1968 		goto out;
1969 
1970 #ifdef CONFIG_CPU_ISOLATION_OPT
1971 	cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask);
1972 	cpumask_and(&allowed_mask, &allowed_mask, cpu_valid_mask);
1973 
1974 	dest_cpu = cpumask_any(&allowed_mask);
1975 	if (dest_cpu >= nr_cpu_ids) {
1976 		cpumask_and(&allowed_mask, cpu_valid_mask, new_mask);
1977 		dest_cpu = cpumask_any(&allowed_mask);
1978 		if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
1979 			ret = -EINVAL;
1980 			goto out;
1981 		}
1982 	}
1983 #else
1984 	/*
1985 	 * Picking a ~random cpu helps in cases where we are changing affinity
1986 	 * for groups of tasks (ie. cpuset), so that load balancing is not
1987 	 * immediately required to distribute the tasks within their new mask.
1988 	 */
1989 	dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
1990 	if (dest_cpu >= nr_cpu_ids) {
1991 		ret = -EINVAL;
1992 		goto out;
1993 	}
1994 #endif
1995 
1996 	do_set_cpus_allowed(p, new_mask);
1997 
1998 	if (p->flags & PF_KTHREAD) {
1999 		/*
2000 		 * For kernel threads that do indeed end up on online &&
2001 		 * !active we want to ensure they are strict per-CPU threads.
2002 		 */
2003 		WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
2004 			!cpumask_intersects(new_mask, cpu_active_mask) &&
2005 			p->nr_cpus_allowed != 1);
2006 	}
2007 
2008 	/* Can the task run on the task's current CPU? If so, we're done */
2009 #ifdef CONFIG_CPU_ISOLATION_OPT
2010 	if (cpumask_test_cpu(task_cpu(p), &allowed_mask))
2011 		goto out;
2012 #else
2013 	if (cpumask_test_cpu(task_cpu(p), new_mask))
2014 		goto out;
2015 #endif
2016 
2017 	if (task_running(rq, p) || p->state == TASK_WAKING) {
2018 		struct migration_arg arg = { p, dest_cpu };
2019 		/* Need help from migration thread: drop lock and wait. */
2020 		task_rq_unlock(rq, p, &rf);
2021 		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
2022 		return 0;
2023 	} else if (task_on_rq_queued(p)) {
2024 		/*
2025 		 * OK, since we're going to drop the lock immediately
2026 		 * afterwards anyway.
2027 		 */
2028 		rq = move_queued_task(rq, &rf, p, dest_cpu);
2029 	}
2030 out:
2031 	task_rq_unlock(rq, p, &rf);
2032 
2033 	return ret;
2034 }
2035 
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)2036 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
2037 {
2038 	return __set_cpus_allowed_ptr(p, new_mask, false);
2039 }
2040 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
2041 
set_task_cpu(struct task_struct * p,unsigned int new_cpu)2042 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2043 {
2044 #ifdef CONFIG_SCHED_DEBUG
2045 	/*
2046 	 * We should never call set_task_cpu() on a blocked task,
2047 	 * ttwu() will sort out the placement.
2048 	 */
2049 	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2050 			!p->on_rq);
2051 
2052 	/*
2053 	 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
2054 	 * because schedstat_wait_{start,end} rebase migrating task's wait_start
2055 	 * time relying on p->on_rq.
2056 	 */
2057 	WARN_ON_ONCE(p->state == TASK_RUNNING &&
2058 		     p->sched_class == &fair_sched_class &&
2059 		     (p->on_rq && !task_on_rq_migrating(p)));
2060 
2061 #ifdef CONFIG_LOCKDEP
2062 	/*
2063 	 * The caller should hold either p->pi_lock or rq->lock, when changing
2064 	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
2065 	 *
2066 	 * sched_move_task() holds both and thus holding either pins the cgroup,
2067 	 * see task_group().
2068 	 *
2069 	 * Furthermore, all task_rq users should acquire both locks, see
2070 	 * task_rq_lock().
2071 	 */
2072 	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2073 				      lockdep_is_held(&task_rq(p)->lock)));
2074 #endif
2075 	/*
2076 	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
2077 	 */
2078 	WARN_ON_ONCE(!cpu_online(new_cpu));
2079 #endif
2080 
2081 	trace_sched_migrate_task(p, new_cpu);
2082 
2083 	if (task_cpu(p) != new_cpu) {
2084 		if (p->sched_class->migrate_task_rq)
2085 			p->sched_class->migrate_task_rq(p, new_cpu);
2086 		p->se.nr_migrations++;
2087 		rseq_migrate(p);
2088 		perf_event_task_migrate(p);
2089 		fixup_busy_time(p, new_cpu);
2090 	}
2091 
2092 	__set_task_cpu(p, new_cpu);
2093 }
2094 
2095 #ifdef CONFIG_NUMA_BALANCING
__migrate_swap_task(struct task_struct * p,int cpu)2096 static void __migrate_swap_task(struct task_struct *p, int cpu)
2097 {
2098 	if (task_on_rq_queued(p)) {
2099 		struct rq *src_rq, *dst_rq;
2100 		struct rq_flags srf, drf;
2101 
2102 		src_rq = task_rq(p);
2103 		dst_rq = cpu_rq(cpu);
2104 
2105 		rq_pin_lock(src_rq, &srf);
2106 		rq_pin_lock(dst_rq, &drf);
2107 
2108 		deactivate_task(src_rq, p, 0);
2109 		set_task_cpu(p, cpu);
2110 		activate_task(dst_rq, p, 0);
2111 		check_preempt_curr(dst_rq, p, 0);
2112 
2113 		rq_unpin_lock(dst_rq, &drf);
2114 		rq_unpin_lock(src_rq, &srf);
2115 
2116 	} else {
2117 		/*
2118 		 * Task isn't running anymore; make it appear like we migrated
2119 		 * it before it went to sleep. This means on wakeup we make the
2120 		 * previous CPU our target instead of where it really is.
2121 		 */
2122 		p->wake_cpu = cpu;
2123 	}
2124 }
2125 
2126 struct migration_swap_arg {
2127 	struct task_struct *src_task, *dst_task;
2128 	int src_cpu, dst_cpu;
2129 };
2130 
migrate_swap_stop(void * data)2131 static int migrate_swap_stop(void *data)
2132 {
2133 	struct migration_swap_arg *arg = data;
2134 	struct rq *src_rq, *dst_rq;
2135 	int ret = -EAGAIN;
2136 
2137 	if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
2138 		return -EAGAIN;
2139 
2140 	src_rq = cpu_rq(arg->src_cpu);
2141 	dst_rq = cpu_rq(arg->dst_cpu);
2142 
2143 	double_raw_lock(&arg->src_task->pi_lock,
2144 			&arg->dst_task->pi_lock);
2145 	double_rq_lock(src_rq, dst_rq);
2146 
2147 	if (task_cpu(arg->dst_task) != arg->dst_cpu)
2148 		goto unlock;
2149 
2150 	if (task_cpu(arg->src_task) != arg->src_cpu)
2151 		goto unlock;
2152 
2153 	if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
2154 		goto unlock;
2155 
2156 	if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
2157 		goto unlock;
2158 
2159 	__migrate_swap_task(arg->src_task, arg->dst_cpu);
2160 	__migrate_swap_task(arg->dst_task, arg->src_cpu);
2161 
2162 	ret = 0;
2163 
2164 unlock:
2165 	double_rq_unlock(src_rq, dst_rq);
2166 	raw_spin_unlock(&arg->dst_task->pi_lock);
2167 	raw_spin_unlock(&arg->src_task->pi_lock);
2168 
2169 	return ret;
2170 }
2171 
2172 /*
2173  * Cross migrate two tasks
2174  */
migrate_swap(struct task_struct * cur,struct task_struct * p,int target_cpu,int curr_cpu)2175 int migrate_swap(struct task_struct *cur, struct task_struct *p,
2176 		int target_cpu, int curr_cpu)
2177 {
2178 	struct migration_swap_arg arg;
2179 	int ret = -EINVAL;
2180 
2181 	arg = (struct migration_swap_arg){
2182 		.src_task = cur,
2183 		.src_cpu = curr_cpu,
2184 		.dst_task = p,
2185 		.dst_cpu = target_cpu,
2186 	};
2187 
2188 	if (arg.src_cpu == arg.dst_cpu)
2189 		goto out;
2190 
2191 	/*
2192 	 * These three tests are all lockless; this is OK since all of them
2193 	 * will be re-checked with proper locks held further down the line.
2194 	 */
2195 	if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
2196 		goto out;
2197 
2198 	if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
2199 		goto out;
2200 
2201 	if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
2202 		goto out;
2203 
2204 	trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
2205 	ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
2206 
2207 out:
2208 	return ret;
2209 }
2210 #endif /* CONFIG_NUMA_BALANCING */
2211 
2212 /*
2213  * wait_task_inactive - wait for a thread to unschedule.
2214  *
2215  * If @match_state is nonzero, it's the @p->state value just checked and
2216  * not expected to change.  If it changes, i.e. @p might have woken up,
2217  * then return zero.  When we succeed in waiting for @p to be off its CPU,
2218  * we return a positive number (its total switch count).  If a second call
2219  * a short while later returns the same number, the caller can be sure that
2220  * @p has remained unscheduled the whole time.
2221  *
2222  * The caller must ensure that the task *will* unschedule sometime soon,
2223  * else this function might spin for a *long* time. This function can't
2224  * be called with interrupts off, or it may introduce deadlock with
2225  * smp_call_function() if an IPI is sent by the same process we are
2226  * waiting to become inactive.
2227  */
wait_task_inactive(struct task_struct * p,long match_state)2228 unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2229 {
2230 	int running, queued;
2231 	struct rq_flags rf;
2232 	unsigned long ncsw;
2233 	struct rq *rq;
2234 
2235 	for (;;) {
2236 		/*
2237 		 * We do the initial early heuristics without holding
2238 		 * any task-queue locks at all. We'll only try to get
2239 		 * the runqueue lock when things look like they will
2240 		 * work out!
2241 		 */
2242 		rq = task_rq(p);
2243 
2244 		/*
2245 		 * If the task is actively running on another CPU
2246 		 * still, just relax and busy-wait without holding
2247 		 * any locks.
2248 		 *
2249 		 * NOTE! Since we don't hold any locks, it's not
2250 		 * even sure that "rq" stays as the right runqueue!
2251 		 * But we don't care, since "task_running()" will
2252 		 * return false if the runqueue has changed and p
2253 		 * is actually now running somewhere else!
2254 		 */
2255 		while (task_running(rq, p)) {
2256 			if (match_state && unlikely(p->state != match_state))
2257 				return 0;
2258 			cpu_relax();
2259 		}
2260 
2261 		/*
2262 		 * Ok, time to look more closely! We need the rq
2263 		 * lock now, to be *sure*. If we're wrong, we'll
2264 		 * just go back and repeat.
2265 		 */
2266 		rq = task_rq_lock(p, &rf);
2267 		trace_sched_wait_task(p);
2268 		running = task_running(rq, p);
2269 		queued = task_on_rq_queued(p);
2270 		ncsw = 0;
2271 		if (!match_state || p->state == match_state)
2272 			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2273 		task_rq_unlock(rq, p, &rf);
2274 
2275 		/*
2276 		 * If it changed from the expected state, bail out now.
2277 		 */
2278 		if (unlikely(!ncsw))
2279 			break;
2280 
2281 		/*
2282 		 * Was it really running after all now that we
2283 		 * checked with the proper locks actually held?
2284 		 *
2285 		 * Oops. Go back and try again..
2286 		 */
2287 		if (unlikely(running)) {
2288 			cpu_relax();
2289 			continue;
2290 		}
2291 
2292 		/*
2293 		 * It's not enough that it's not actively running,
2294 		 * it must be off the runqueue _entirely_, and not
2295 		 * preempted!
2296 		 *
2297 		 * So if it was still runnable (but just not actively
2298 		 * running right now), it's preempted, and we should
2299 		 * yield - it could be a while.
2300 		 */
2301 		if (unlikely(queued)) {
2302 			ktime_t to = NSEC_PER_SEC / HZ;
2303 
2304 			set_current_state(TASK_UNINTERRUPTIBLE);
2305 			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
2306 			continue;
2307 		}
2308 
2309 		/*
2310 		 * Ahh, all good. It wasn't running, and it wasn't
2311 		 * runnable, which means that it will never become
2312 		 * running in the future either. We're all done!
2313 		 */
2314 		break;
2315 	}
2316 
2317 	return ncsw;
2318 }
2319 
2320 /***
2321  * kick_process - kick a running thread to enter/exit the kernel
2322  * @p: the to-be-kicked thread
2323  *
2324  * Cause a process which is running on another CPU to enter
2325  * kernel-mode, without any delay. (to get signals handled.)
2326  *
2327  * NOTE: this function doesn't have to take the runqueue lock,
2328  * because all it wants to ensure is that the remote task enters
2329  * the kernel. If the IPI races and the task has been migrated
2330  * to another CPU then no harm is done and the purpose has been
2331  * achieved as well.
2332  */
kick_process(struct task_struct * p)2333 void kick_process(struct task_struct *p)
2334 {
2335 	int cpu;
2336 
2337 	preempt_disable();
2338 	cpu = task_cpu(p);
2339 	if ((cpu != smp_processor_id()) && task_curr(p))
2340 		smp_send_reschedule(cpu);
2341 	preempt_enable();
2342 }
2343 EXPORT_SYMBOL_GPL(kick_process);
2344 
2345 /*
2346  * ->cpus_ptr is protected by both rq->lock and p->pi_lock
2347  *
2348  * A few notes on cpu_active vs cpu_online:
2349  *
2350  *  - cpu_active must be a subset of cpu_online
2351  *
2352  *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
2353  *    see __set_cpus_allowed_ptr(). At this point the newly online
2354  *    CPU isn't yet part of the sched domains, and balancing will not
2355  *    see it.
2356  *
2357  *  - on CPU-down we clear cpu_active() to mask the sched domains and
2358  *    avoid the load balancer to place new tasks on the to be removed
2359  *    CPU. Existing tasks will remain running there and will be taken
2360  *    off.
2361  *
2362  * This means that fallback selection must not select !active CPUs.
2363  * And can assume that any active CPU must be online. Conversely
2364  * select_task_rq() below may allow selection of !active CPUs in order
2365  * to satisfy the above rules.
2366  */
2367 #ifdef CONFIG_CPU_ISOLATION_OPT
select_fallback_rq(int cpu,struct task_struct * p,bool allow_iso)2368 static int select_fallback_rq(int cpu, struct task_struct *p, bool allow_iso)
2369 #else
2370 static int select_fallback_rq(int cpu, struct task_struct *p)
2371 #endif
2372 {
2373 	int nid = cpu_to_node(cpu);
2374 	const struct cpumask *nodemask = NULL;
2375 	enum { cpuset, possible, fail, bug } state = cpuset;
2376 	int dest_cpu;
2377 #ifdef CONFIG_CPU_ISOLATION_OPT
2378 	int isolated_candidate = -1;
2379 #endif
2380 
2381 	/*
2382 	 * If the node that the CPU is on has been offlined, cpu_to_node()
2383 	 * will return -1. There is no CPU on the node, and we should
2384 	 * select the CPU on the other node.
2385 	 */
2386 	if (nid != -1) {
2387 		nodemask = cpumask_of_node(nid);
2388 
2389 		/* Look for allowed, online CPU in same node. */
2390 		for_each_cpu(dest_cpu, nodemask) {
2391 			if (!cpu_active(dest_cpu))
2392 				continue;
2393 			if (cpu_isolated(dest_cpu))
2394 				continue;
2395 			if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
2396 				return dest_cpu;
2397 		}
2398 	}
2399 
2400 	for (;;) {
2401 		/* Any allowed, online CPU? */
2402 		for_each_cpu(dest_cpu, p->cpus_ptr) {
2403 			if (!is_cpu_allowed(p, dest_cpu))
2404 				continue;
2405 #ifdef CONFIG_CPU_ISOLATION_OPT
2406 			if (cpu_isolated(dest_cpu)) {
2407 				if (allow_iso)
2408 					isolated_candidate = dest_cpu;
2409 				continue;
2410 			}
2411 			goto out;
2412 		}
2413 
2414 		if (isolated_candidate != -1) {
2415 			dest_cpu = isolated_candidate;
2416 #endif
2417 			goto out;
2418 		}
2419 
2420 		/* No more Mr. Nice Guy. */
2421 		switch (state) {
2422 		case cpuset:
2423 			if (IS_ENABLED(CONFIG_CPUSETS)) {
2424 				cpuset_cpus_allowed_fallback(p);
2425 				state = possible;
2426 				break;
2427 			}
2428 			fallthrough;
2429 		case possible:
2430 			do_set_cpus_allowed(p, cpu_possible_mask);
2431 			state = fail;
2432 			break;
2433 
2434 		case fail:
2435 #ifdef CONFIG_CPU_ISOLATION_OPT
2436 			allow_iso = true;
2437 			state = bug;
2438 			break;
2439 #else
2440 			/* fall through; */
2441 #endif
2442 
2443 		case bug:
2444 			BUG();
2445 			break;
2446 		}
2447 	}
2448 
2449 out:
2450 	if (state != cpuset) {
2451 		/*
2452 		 * Don't tell them about moving exiting tasks or
2453 		 * kernel threads (both mm NULL), since they never
2454 		 * leave kernel.
2455 		 */
2456 		if (p->mm && printk_ratelimit()) {
2457 			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
2458 					task_pid_nr(p), p->comm, cpu);
2459 		}
2460 	}
2461 
2462 	return dest_cpu;
2463 }
2464 
2465 /*
2466  * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
2467  */
2468 static inline
select_task_rq(struct task_struct * p,int cpu,int sd_flags,int wake_flags)2469 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
2470 {
2471 #ifdef CONFIG_CPU_ISOLATION_OPT
2472 	bool allow_isolated = (p->flags & PF_KTHREAD);
2473 #endif
2474 
2475 	lockdep_assert_held(&p->pi_lock);
2476 
2477 	if (p->nr_cpus_allowed > 1)
2478 		cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
2479 	else
2480 		cpu = cpumask_any(p->cpus_ptr);
2481 
2482 	/*
2483 	 * In order not to call set_task_cpu() on a blocking task we need
2484 	 * to rely on ttwu() to place the task on a valid ->cpus_ptr
2485 	 * CPU.
2486 	 *
2487 	 * Since this is common to all placement strategies, this lives here.
2488 	 *
2489 	 * [ this allows ->select_task() to simply return task_cpu(p) and
2490 	 *   not worry about this generic constraint ]
2491 	 */
2492 #ifdef CONFIG_CPU_ISOLATION_OPT
2493 	if (unlikely(!is_cpu_allowed(p, cpu)) ||
2494 			(cpu_isolated(cpu) && !allow_isolated))
2495 		cpu = select_fallback_rq(task_cpu(p), p, allow_isolated);
2496 #else
2497 	if (unlikely(!is_cpu_allowed(p, cpu)))
2498 		cpu = select_fallback_rq(task_cpu(p), p);
2499 #endif
2500 
2501 	return cpu;
2502 }
2503 
sched_set_stop_task(int cpu,struct task_struct * stop)2504 void sched_set_stop_task(int cpu, struct task_struct *stop)
2505 {
2506 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2507 	struct task_struct *old_stop = cpu_rq(cpu)->stop;
2508 
2509 	if (stop) {
2510 		/*
2511 		 * Make it appear like a SCHED_FIFO task, its something
2512 		 * userspace knows about and won't get confused about.
2513 		 *
2514 		 * Also, it will make PI more or less work without too
2515 		 * much confusion -- but then, stop work should not
2516 		 * rely on PI working anyway.
2517 		 */
2518 		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2519 
2520 		stop->sched_class = &stop_sched_class;
2521 	}
2522 
2523 	cpu_rq(cpu)->stop = stop;
2524 
2525 	if (old_stop) {
2526 		/*
2527 		 * Reset it back to a normal scheduling class so that
2528 		 * it can die in pieces.
2529 		 */
2530 		old_stop->sched_class = &rt_sched_class;
2531 	}
2532 }
2533 
2534 #else
2535 
__set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask,bool check)2536 static inline int __set_cpus_allowed_ptr(struct task_struct *p,
2537 					 const struct cpumask *new_mask, bool check)
2538 {
2539 	return set_cpus_allowed_ptr(p, new_mask);
2540 }
2541 
2542 #endif /* CONFIG_SMP */
2543 
2544 static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)2545 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2546 {
2547 	struct rq *rq;
2548 
2549 	if (!schedstat_enabled())
2550 		return;
2551 
2552 	rq = this_rq();
2553 
2554 #ifdef CONFIG_SMP
2555 	if (cpu == rq->cpu) {
2556 		__schedstat_inc(rq->ttwu_local);
2557 		__schedstat_inc(p->se.statistics.nr_wakeups_local);
2558 	} else {
2559 		struct sched_domain *sd;
2560 
2561 		__schedstat_inc(p->se.statistics.nr_wakeups_remote);
2562 		rcu_read_lock();
2563 		for_each_domain(rq->cpu, sd) {
2564 			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2565 				__schedstat_inc(sd->ttwu_wake_remote);
2566 				break;
2567 			}
2568 		}
2569 		rcu_read_unlock();
2570 	}
2571 
2572 	if (wake_flags & WF_MIGRATED)
2573 		__schedstat_inc(p->se.statistics.nr_wakeups_migrate);
2574 #endif /* CONFIG_SMP */
2575 
2576 	__schedstat_inc(rq->ttwu_count);
2577 	__schedstat_inc(p->se.statistics.nr_wakeups);
2578 
2579 	if (wake_flags & WF_SYNC)
2580 		__schedstat_inc(p->se.statistics.nr_wakeups_sync);
2581 }
2582 
2583 /*
2584  * Mark the task runnable and perform wakeup-preemption.
2585  */
ttwu_do_wakeup(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)2586 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
2587 			   struct rq_flags *rf)
2588 {
2589 	check_preempt_curr(rq, p, wake_flags);
2590 	p->state = TASK_RUNNING;
2591 	trace_sched_wakeup(p);
2592 
2593 #ifdef CONFIG_SMP
2594 	if (p->sched_class->task_woken) {
2595 		/*
2596 		 * Our task @p is fully woken up and running; so its safe to
2597 		 * drop the rq->lock, hereafter rq is only used for statistics.
2598 		 */
2599 		rq_unpin_lock(rq, rf);
2600 		p->sched_class->task_woken(rq, p);
2601 		rq_repin_lock(rq, rf);
2602 	}
2603 
2604 	if (rq->idle_stamp) {
2605 		u64 delta = rq_clock(rq) - rq->idle_stamp;
2606 		u64 max = 2*rq->max_idle_balance_cost;
2607 
2608 		update_avg(&rq->avg_idle, delta);
2609 
2610 		if (rq->avg_idle > max)
2611 			rq->avg_idle = max;
2612 
2613 		rq->idle_stamp = 0;
2614 	}
2615 #endif
2616 }
2617 
2618 static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)2619 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
2620 		 struct rq_flags *rf)
2621 {
2622 	int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
2623 
2624 	lockdep_assert_held(&rq->lock);
2625 
2626 	if (p->sched_contributes_to_load)
2627 		rq->nr_uninterruptible--;
2628 
2629 #ifdef CONFIG_SMP
2630 	if (wake_flags & WF_MIGRATED)
2631 		en_flags |= ENQUEUE_MIGRATED;
2632 	else
2633 #endif
2634 	if (p->in_iowait) {
2635 		delayacct_blkio_end(p);
2636 		atomic_dec(&task_rq(p)->nr_iowait);
2637 	}
2638 
2639 	activate_task(rq, p, en_flags);
2640 	ttwu_do_wakeup(rq, p, wake_flags, rf);
2641 }
2642 
2643 /*
2644  * Consider @p being inside a wait loop:
2645  *
2646  *   for (;;) {
2647  *      set_current_state(TASK_UNINTERRUPTIBLE);
2648  *
2649  *      if (CONDITION)
2650  *         break;
2651  *
2652  *      schedule();
2653  *   }
2654  *   __set_current_state(TASK_RUNNING);
2655  *
2656  * between set_current_state() and schedule(). In this case @p is still
2657  * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
2658  * an atomic manner.
2659  *
2660  * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
2661  * then schedule() must still happen and p->state can be changed to
2662  * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
2663  * need to do a full wakeup with enqueue.
2664  *
2665  * Returns: %true when the wakeup is done,
2666  *          %false otherwise.
2667  */
ttwu_runnable(struct task_struct * p,int wake_flags)2668 static int ttwu_runnable(struct task_struct *p, int wake_flags)
2669 {
2670 	struct rq_flags rf;
2671 	struct rq *rq;
2672 	int ret = 0;
2673 
2674 	rq = __task_rq_lock(p, &rf);
2675 	if (task_on_rq_queued(p)) {
2676 		/* check_preempt_curr() may use rq clock */
2677 		update_rq_clock(rq);
2678 		ttwu_do_wakeup(rq, p, wake_flags, &rf);
2679 		ret = 1;
2680 	}
2681 	__task_rq_unlock(rq, &rf);
2682 
2683 	return ret;
2684 }
2685 
2686 #ifdef CONFIG_SMP
sched_ttwu_pending(void * arg)2687 void sched_ttwu_pending(void *arg)
2688 {
2689 	struct llist_node *llist = arg;
2690 	struct rq *rq = this_rq();
2691 	struct task_struct *p, *t;
2692 	struct rq_flags rf;
2693 
2694 	if (!llist)
2695 		return;
2696 
2697 	/*
2698 	 * rq::ttwu_pending racy indication of out-standing wakeups.
2699 	 * Races such that false-negatives are possible, since they
2700 	 * are shorter lived that false-positives would be.
2701 	 */
2702 	WRITE_ONCE(rq->ttwu_pending, 0);
2703 
2704 	rq_lock_irqsave(rq, &rf);
2705 	update_rq_clock(rq);
2706 
2707 	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
2708 		if (WARN_ON_ONCE(p->on_cpu))
2709 			smp_cond_load_acquire(&p->on_cpu, !VAL);
2710 
2711 		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
2712 			set_task_cpu(p, cpu_of(rq));
2713 
2714 		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
2715 	}
2716 
2717 	rq_unlock_irqrestore(rq, &rf);
2718 }
2719 
send_call_function_single_ipi(int cpu)2720 void send_call_function_single_ipi(int cpu)
2721 {
2722 	struct rq *rq = cpu_rq(cpu);
2723 
2724 	if (!set_nr_if_polling(rq->idle))
2725 		arch_send_call_function_single_ipi(cpu);
2726 	else
2727 		trace_sched_wake_idle_without_ipi(cpu);
2728 }
2729 
2730 /*
2731  * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
2732  * necessary. The wakee CPU on receipt of the IPI will queue the task
2733  * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
2734  * of the wakeup instead of the waker.
2735  */
__ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)2736 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
2737 {
2738 	struct rq *rq = cpu_rq(cpu);
2739 
2740 	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
2741 
2742 	WRITE_ONCE(rq->ttwu_pending, 1);
2743 	__smp_call_single_queue(cpu, &p->wake_entry.llist);
2744 }
2745 
wake_up_if_idle(int cpu)2746 void wake_up_if_idle(int cpu)
2747 {
2748 	struct rq *rq = cpu_rq(cpu);
2749 	struct rq_flags rf;
2750 
2751 	rcu_read_lock();
2752 
2753 	if (!is_idle_task(rcu_dereference(rq->curr)))
2754 		goto out;
2755 
2756 	if (set_nr_if_polling(rq->idle)) {
2757 		trace_sched_wake_idle_without_ipi(cpu);
2758 	} else {
2759 		rq_lock_irqsave(rq, &rf);
2760 		if (is_idle_task(rq->curr))
2761 			smp_send_reschedule(cpu);
2762 		/* Else CPU is not idle, do nothing here: */
2763 		rq_unlock_irqrestore(rq, &rf);
2764 	}
2765 
2766 out:
2767 	rcu_read_unlock();
2768 }
2769 
cpus_share_cache(int this_cpu,int that_cpu)2770 bool cpus_share_cache(int this_cpu, int that_cpu)
2771 {
2772 	if (this_cpu == that_cpu)
2773 		return true;
2774 
2775 	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
2776 }
2777 
ttwu_queue_cond(int cpu,int wake_flags)2778 static inline bool ttwu_queue_cond(int cpu, int wake_flags)
2779 {
2780 	/*
2781 	 * If the CPU does not share cache, then queue the task on the
2782 	 * remote rqs wakelist to avoid accessing remote data.
2783 	 */
2784 	if (!cpus_share_cache(smp_processor_id(), cpu))
2785 		return true;
2786 
2787 	/*
2788 	 * If the task is descheduling and the only running task on the
2789 	 * CPU then use the wakelist to offload the task activation to
2790 	 * the soon-to-be-idle CPU as the current CPU is likely busy.
2791 	 * nr_running is checked to avoid unnecessary task stacking.
2792 	 */
2793 	if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1)
2794 		return true;
2795 
2796 	return false;
2797 }
2798 
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)2799 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
2800 {
2801 	if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) {
2802 		if (WARN_ON_ONCE(cpu == smp_processor_id()))
2803 			return false;
2804 
2805 		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
2806 		__ttwu_queue_wakelist(p, cpu, wake_flags);
2807 		return true;
2808 	}
2809 
2810 	return false;
2811 }
2812 
2813 #else /* !CONFIG_SMP */
2814 
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)2815 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
2816 {
2817 	return false;
2818 }
2819 
2820 #endif /* CONFIG_SMP */
2821 
ttwu_queue(struct task_struct * p,int cpu,int wake_flags)2822 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
2823 {
2824 	struct rq *rq = cpu_rq(cpu);
2825 	struct rq_flags rf;
2826 
2827 	if (ttwu_queue_wakelist(p, cpu, wake_flags))
2828 		return;
2829 
2830 	rq_lock(rq, &rf);
2831 	update_rq_clock(rq);
2832 	ttwu_do_activate(rq, p, wake_flags, &rf);
2833 	rq_unlock(rq, &rf);
2834 }
2835 
2836 /*
2837  * Notes on Program-Order guarantees on SMP systems.
2838  *
2839  *  MIGRATION
2840  *
2841  * The basic program-order guarantee on SMP systems is that when a task [t]
2842  * migrates, all its activity on its old CPU [c0] happens-before any subsequent
2843  * execution on its new CPU [c1].
2844  *
2845  * For migration (of runnable tasks) this is provided by the following means:
2846  *
2847  *  A) UNLOCK of the rq(c0)->lock scheduling out task t
2848  *  B) migration for t is required to synchronize *both* rq(c0)->lock and
2849  *     rq(c1)->lock (if not at the same time, then in that order).
2850  *  C) LOCK of the rq(c1)->lock scheduling in task
2851  *
2852  * Release/acquire chaining guarantees that B happens after A and C after B.
2853  * Note: the CPU doing B need not be c0 or c1
2854  *
2855  * Example:
2856  *
2857  *   CPU0            CPU1            CPU2
2858  *
2859  *   LOCK rq(0)->lock
2860  *   sched-out X
2861  *   sched-in Y
2862  *   UNLOCK rq(0)->lock
2863  *
2864  *                                   LOCK rq(0)->lock // orders against CPU0
2865  *                                   dequeue X
2866  *                                   UNLOCK rq(0)->lock
2867  *
2868  *                                   LOCK rq(1)->lock
2869  *                                   enqueue X
2870  *                                   UNLOCK rq(1)->lock
2871  *
2872  *                   LOCK rq(1)->lock // orders against CPU2
2873  *                   sched-out Z
2874  *                   sched-in X
2875  *                   UNLOCK rq(1)->lock
2876  *
2877  *
2878  *  BLOCKING -- aka. SLEEP + WAKEUP
2879  *
2880  * For blocking we (obviously) need to provide the same guarantee as for
2881  * migration. However the means are completely different as there is no lock
2882  * chain to provide order. Instead we do:
2883  *
2884  *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
2885  *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
2886  *
2887  * Example:
2888  *
2889  *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
2890  *
2891  *   LOCK rq(0)->lock LOCK X->pi_lock
2892  *   dequeue X
2893  *   sched-out X
2894  *   smp_store_release(X->on_cpu, 0);
2895  *
2896  *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
2897  *                    X->state = WAKING
2898  *                    set_task_cpu(X,2)
2899  *
2900  *                    LOCK rq(2)->lock
2901  *                    enqueue X
2902  *                    X->state = RUNNING
2903  *                    UNLOCK rq(2)->lock
2904  *
2905  *                                          LOCK rq(2)->lock // orders against CPU1
2906  *                                          sched-out Z
2907  *                                          sched-in X
2908  *                                          UNLOCK rq(2)->lock
2909  *
2910  *                    UNLOCK X->pi_lock
2911  *   UNLOCK rq(0)->lock
2912  *
2913  *
2914  * However, for wakeups there is a second guarantee we must provide, namely we
2915  * must ensure that CONDITION=1 done by the caller can not be reordered with
2916  * accesses to the task state; see try_to_wake_up() and set_current_state().
2917  */
2918 
2919 #ifdef CONFIG_SMP
2920 #ifdef CONFIG_SCHED_WALT
2921 /* utility function to update walt signals at wakeup */
walt_try_to_wake_up(struct task_struct * p)2922 static inline void walt_try_to_wake_up(struct task_struct *p)
2923 {
2924 	struct rq *rq = cpu_rq(task_cpu(p));
2925 	struct rq_flags rf;
2926 	u64 wallclock;
2927 
2928 	rq_lock_irqsave(rq, &rf);
2929 	wallclock = sched_ktime_clock();
2930 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
2931 	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
2932 	rq_unlock_irqrestore(rq, &rf);
2933 }
2934 #else
2935 #define walt_try_to_wake_up(a) {}
2936 #endif
2937 #endif
2938 
2939 /**
2940  * try_to_wake_up - wake up a thread
2941  * @p: the thread to be awakened
2942  * @state: the mask of task states that can be woken
2943  * @wake_flags: wake modifier flags (WF_*)
2944  *
2945  * Conceptually does:
2946  *
2947  *   If (@state & @p->state) @p->state = TASK_RUNNING.
2948  *
2949  * If the task was not queued/runnable, also place it back on a runqueue.
2950  *
2951  * This function is atomic against schedule() which would dequeue the task.
2952  *
2953  * It issues a full memory barrier before accessing @p->state, see the comment
2954  * with set_current_state().
2955  *
2956  * Uses p->pi_lock to serialize against concurrent wake-ups.
2957  *
2958  * Relies on p->pi_lock stabilizing:
2959  *  - p->sched_class
2960  *  - p->cpus_ptr
2961  *  - p->sched_task_group
2962  * in order to do migration, see its use of select_task_rq()/set_task_cpu().
2963  *
2964  * Tries really hard to only take one task_rq(p)->lock for performance.
2965  * Takes rq->lock in:
2966  *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
2967  *  - ttwu_queue()       -- new rq, for enqueue of the task;
2968  *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
2969  *
2970  * As a consequence we race really badly with just about everything. See the
2971  * many memory barriers and their comments for details.
2972  *
2973  * Return: %true if @p->state changes (an actual wakeup was done),
2974  *	   %false otherwise.
2975  */
2976 static int
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)2977 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2978 {
2979 	unsigned long flags;
2980 	int cpu, success = 0;
2981 
2982 	preempt_disable();
2983 	if (p == current) {
2984 		/*
2985 		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
2986 		 * == smp_processor_id()'. Together this means we can special
2987 		 * case the whole 'p->on_rq && ttwu_runnable()' case below
2988 		 * without taking any locks.
2989 		 *
2990 		 * In particular:
2991 		 *  - we rely on Program-Order guarantees for all the ordering,
2992 		 *  - we're serialized against set_special_state() by virtue of
2993 		 *    it disabling IRQs (this allows not taking ->pi_lock).
2994 		 */
2995 		if (!(p->state & state))
2996 			goto out;
2997 
2998 		success = 1;
2999 		trace_sched_waking(p);
3000 		p->state = TASK_RUNNING;
3001 		trace_sched_wakeup(p);
3002 		goto out;
3003 	}
3004 
3005 	/*
3006 	 * If we are going to wake up a thread waiting for CONDITION we
3007 	 * need to ensure that CONDITION=1 done by the caller can not be
3008 	 * reordered with p->state check below. This pairs with smp_store_mb()
3009 	 * in set_current_state() that the waiting thread does.
3010 	 */
3011 	raw_spin_lock_irqsave(&p->pi_lock, flags);
3012 	smp_mb__after_spinlock();
3013 	if (!(p->state & state))
3014 		goto unlock;
3015 
3016 	trace_sched_waking(p);
3017 
3018 	/* We're going to change ->state: */
3019 	success = 1;
3020 
3021 	/*
3022 	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
3023 	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
3024 	 * in smp_cond_load_acquire() below.
3025 	 *
3026 	 * sched_ttwu_pending()			try_to_wake_up()
3027 	 *   STORE p->on_rq = 1			  LOAD p->state
3028 	 *   UNLOCK rq->lock
3029 	 *
3030 	 * __schedule() (switch to task 'p')
3031 	 *   LOCK rq->lock			  smp_rmb();
3032 	 *   smp_mb__after_spinlock();
3033 	 *   UNLOCK rq->lock
3034 	 *
3035 	 * [task p]
3036 	 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
3037 	 *
3038 	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
3039 	 * __schedule().  See the comment for smp_mb__after_spinlock().
3040 	 *
3041 	 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
3042 	 */
3043 	smp_rmb();
3044 	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
3045 		goto unlock;
3046 
3047 #ifdef CONFIG_SMP
3048 	/*
3049 	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
3050 	 * possible to, falsely, observe p->on_cpu == 0.
3051 	 *
3052 	 * One must be running (->on_cpu == 1) in order to remove oneself
3053 	 * from the runqueue.
3054 	 *
3055 	 * __schedule() (switch to task 'p')	try_to_wake_up()
3056 	 *   STORE p->on_cpu = 1		  LOAD p->on_rq
3057 	 *   UNLOCK rq->lock
3058 	 *
3059 	 * __schedule() (put 'p' to sleep)
3060 	 *   LOCK rq->lock			  smp_rmb();
3061 	 *   smp_mb__after_spinlock();
3062 	 *   STORE p->on_rq = 0			  LOAD p->on_cpu
3063 	 *
3064 	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
3065 	 * __schedule().  See the comment for smp_mb__after_spinlock().
3066 	 *
3067 	 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
3068 	 * schedule()'s deactivate_task() has 'happened' and p will no longer
3069 	 * care about it's own p->state. See the comment in __schedule().
3070 	 */
3071 	smp_acquire__after_ctrl_dep();
3072 
3073 	walt_try_to_wake_up(p);
3074 
3075 	/*
3076 	 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
3077 	 * == 0), which means we need to do an enqueue, change p->state to
3078 	 * TASK_WAKING such that we can unlock p->pi_lock before doing the
3079 	 * enqueue, such as ttwu_queue_wakelist().
3080 	 */
3081 	p->state = TASK_WAKING;
3082 
3083 	/*
3084 	 * If the owning (remote) CPU is still in the middle of schedule() with
3085 	 * this task as prev, considering queueing p on the remote CPUs wake_list
3086 	 * which potentially sends an IPI instead of spinning on p->on_cpu to
3087 	 * let the waker make forward progress. This is safe because IRQs are
3088 	 * disabled and the IPI will deliver after on_cpu is cleared.
3089 	 *
3090 	 * Ensure we load task_cpu(p) after p->on_cpu:
3091 	 *
3092 	 * set_task_cpu(p, cpu);
3093 	 *   STORE p->cpu = @cpu
3094 	 * __schedule() (switch to task 'p')
3095 	 *   LOCK rq->lock
3096 	 *   smp_mb__after_spin_lock()		smp_cond_load_acquire(&p->on_cpu)
3097 	 *   STORE p->on_cpu = 1		LOAD p->cpu
3098 	 *
3099 	 * to ensure we observe the correct CPU on which the task is currently
3100 	 * scheduling.
3101 	 */
3102 	if (smp_load_acquire(&p->on_cpu) &&
3103 	    ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU))
3104 		goto unlock;
3105 
3106 	/*
3107 	 * If the owning (remote) CPU is still in the middle of schedule() with
3108 	 * this task as prev, wait until its done referencing the task.
3109 	 *
3110 	 * Pairs with the smp_store_release() in finish_task().
3111 	 *
3112 	 * This ensures that tasks getting woken will be fully ordered against
3113 	 * their previous state and preserve Program Order.
3114 	 */
3115 	smp_cond_load_acquire(&p->on_cpu, !VAL);
3116 
3117 	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
3118 	if (task_cpu(p) != cpu) {
3119 		if (p->in_iowait) {
3120 			delayacct_blkio_end(p);
3121 			atomic_dec(&task_rq(p)->nr_iowait);
3122 		}
3123 
3124 		wake_flags |= WF_MIGRATED;
3125 		psi_ttwu_dequeue(p);
3126 		set_task_cpu(p, cpu);
3127 	}
3128 #else
3129 	cpu = task_cpu(p);
3130 #endif /* CONFIG_SMP */
3131 
3132 	ttwu_queue(p, cpu, wake_flags);
3133 unlock:
3134 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3135 out:
3136 	if (success)
3137 		ttwu_stat(p, task_cpu(p), wake_flags);
3138 	preempt_enable();
3139 
3140 	return success;
3141 }
3142 
3143 /**
3144  * try_invoke_on_locked_down_task - Invoke a function on task in fixed state
3145  * @p: Process for which the function is to be invoked, can be @current.
3146  * @func: Function to invoke.
3147  * @arg: Argument to function.
3148  *
3149  * If the specified task can be quickly locked into a definite state
3150  * (either sleeping or on a given runqueue), arrange to keep it in that
3151  * state while invoking @func(@arg).  This function can use ->on_rq and
3152  * task_curr() to work out what the state is, if required.  Given that
3153  * @func can be invoked with a runqueue lock held, it had better be quite
3154  * lightweight.
3155  *
3156  * Returns:
3157  *	@false if the task slipped out from under the locks.
3158  *	@true if the task was locked onto a runqueue or is sleeping.
3159  *		However, @func can override this by returning @false.
3160  */
try_invoke_on_locked_down_task(struct task_struct * p,bool (* func)(struct task_struct * t,void * arg),void * arg)3161 bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg)
3162 {
3163 	struct rq_flags rf;
3164 	bool ret = false;
3165 	struct rq *rq;
3166 
3167 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
3168 	if (p->on_rq) {
3169 		rq = __task_rq_lock(p, &rf);
3170 		if (task_rq(p) == rq)
3171 			ret = func(p, arg);
3172 		rq_unlock(rq, &rf);
3173 	} else {
3174 		switch (p->state) {
3175 		case TASK_RUNNING:
3176 		case TASK_WAKING:
3177 			break;
3178 		default:
3179 			smp_rmb(); // See smp_rmb() comment in try_to_wake_up().
3180 			if (!p->on_rq)
3181 				ret = func(p, arg);
3182 		}
3183 	}
3184 	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
3185 	return ret;
3186 }
3187 
3188 /**
3189  * wake_up_process - Wake up a specific process
3190  * @p: The process to be woken up.
3191  *
3192  * Attempt to wake up the nominated process and move it to the set of runnable
3193  * processes.
3194  *
3195  * Return: 1 if the process was woken up, 0 if it was already running.
3196  *
3197  * This function executes a full memory barrier before accessing the task state.
3198  */
wake_up_process(struct task_struct * p)3199 int wake_up_process(struct task_struct *p)
3200 {
3201 	return try_to_wake_up(p, TASK_NORMAL, 0);
3202 }
3203 EXPORT_SYMBOL(wake_up_process);
3204 
wake_up_state(struct task_struct * p,unsigned int state)3205 int wake_up_state(struct task_struct *p, unsigned int state)
3206 {
3207 	return try_to_wake_up(p, state, 0);
3208 }
3209 
3210 /*
3211  * Perform scheduler related setup for a newly forked process p.
3212  * p is forked by current.
3213  *
3214  * __sched_fork() is basic setup used by init_idle() too:
3215  */
__sched_fork(unsigned long clone_flags,struct task_struct * p)3216 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
3217 {
3218 	p->on_rq			= 0;
3219 
3220 	p->se.on_rq			= 0;
3221 	p->se.exec_start		= 0;
3222 	p->se.sum_exec_runtime		= 0;
3223 	p->se.prev_sum_exec_runtime	= 0;
3224 	p->se.nr_migrations		= 0;
3225 	p->se.vruntime			= 0;
3226 	INIT_LIST_HEAD(&p->se.group_node);
3227 
3228 #ifdef CONFIG_FAIR_GROUP_SCHED
3229 	p->se.cfs_rq			= NULL;
3230 #endif
3231 
3232 #ifdef CONFIG_SCHEDSTATS
3233 	/* Even if schedstat is disabled, there should not be garbage */
3234 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
3235 #endif
3236 
3237 	RB_CLEAR_NODE(&p->dl.rb_node);
3238 	init_dl_task_timer(&p->dl);
3239 	init_dl_inactive_task_timer(&p->dl);
3240 	__dl_clear_params(p);
3241 
3242 	INIT_LIST_HEAD(&p->rt.run_list);
3243 	p->rt.timeout		= 0;
3244 	p->rt.time_slice	= sched_rr_timeslice;
3245 	p->rt.on_rq		= 0;
3246 	p->rt.on_list		= 0;
3247 
3248 #ifdef CONFIG_PREEMPT_NOTIFIERS
3249 	INIT_HLIST_HEAD(&p->preempt_notifiers);
3250 #endif
3251 
3252 #ifdef CONFIG_COMPACTION
3253 	p->capture_control = NULL;
3254 #endif
3255 	init_numa_balancing(clone_flags, p);
3256 #ifdef CONFIG_SMP
3257 	p->wake_entry.u_flags = CSD_TYPE_TTWU;
3258 #endif
3259 #ifdef CONFIG_SCHED_RTG
3260 	p->rtg_depth = 0;
3261 #endif
3262 }
3263 
3264 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
3265 
3266 #ifdef CONFIG_NUMA_BALANCING
3267 
set_numabalancing_state(bool enabled)3268 void set_numabalancing_state(bool enabled)
3269 {
3270 	if (enabled)
3271 		static_branch_enable(&sched_numa_balancing);
3272 	else
3273 		static_branch_disable(&sched_numa_balancing);
3274 }
3275 
3276 #ifdef CONFIG_PROC_SYSCTL
sysctl_numa_balancing(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)3277 int sysctl_numa_balancing(struct ctl_table *table, int write,
3278 			  void *buffer, size_t *lenp, loff_t *ppos)
3279 {
3280 	struct ctl_table t;
3281 	int err;
3282 	int state = static_branch_likely(&sched_numa_balancing);
3283 
3284 	if (write && !capable(CAP_SYS_ADMIN))
3285 		return -EPERM;
3286 
3287 	t = *table;
3288 	t.data = &state;
3289 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3290 	if (err < 0)
3291 		return err;
3292 	if (write)
3293 		set_numabalancing_state(state);
3294 	return err;
3295 }
3296 #endif
3297 #endif
3298 
3299 #ifdef CONFIG_SCHEDSTATS
3300 
3301 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
3302 static bool __initdata __sched_schedstats = false;
3303 
set_schedstats(bool enabled)3304 static void set_schedstats(bool enabled)
3305 {
3306 	if (enabled)
3307 		static_branch_enable(&sched_schedstats);
3308 	else
3309 		static_branch_disable(&sched_schedstats);
3310 }
3311 
force_schedstat_enabled(void)3312 void force_schedstat_enabled(void)
3313 {
3314 	if (!schedstat_enabled()) {
3315 		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
3316 		static_branch_enable(&sched_schedstats);
3317 	}
3318 }
3319 
setup_schedstats(char * str)3320 static int __init setup_schedstats(char *str)
3321 {
3322 	int ret = 0;
3323 	if (!str)
3324 		goto out;
3325 
3326 	/*
3327 	 * This code is called before jump labels have been set up, so we can't
3328 	 * change the static branch directly just yet.  Instead set a temporary
3329 	 * variable so init_schedstats() can do it later.
3330 	 */
3331 	if (!strcmp(str, "enable")) {
3332 		__sched_schedstats = true;
3333 		ret = 1;
3334 	} else if (!strcmp(str, "disable")) {
3335 		__sched_schedstats = false;
3336 		ret = 1;
3337 	}
3338 out:
3339 	if (!ret)
3340 		pr_warn("Unable to parse schedstats=\n");
3341 
3342 	return ret;
3343 }
3344 __setup("schedstats=", setup_schedstats);
3345 
init_schedstats(void)3346 static void __init init_schedstats(void)
3347 {
3348 	set_schedstats(__sched_schedstats);
3349 }
3350 
3351 #ifdef CONFIG_PROC_SYSCTL
sysctl_schedstats(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)3352 int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
3353 		size_t *lenp, loff_t *ppos)
3354 {
3355 	struct ctl_table t;
3356 	int err;
3357 	int state = static_branch_likely(&sched_schedstats);
3358 
3359 	if (write && !capable(CAP_SYS_ADMIN))
3360 		return -EPERM;
3361 
3362 	t = *table;
3363 	t.data = &state;
3364 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3365 	if (err < 0)
3366 		return err;
3367 	if (write)
3368 		set_schedstats(state);
3369 	return err;
3370 }
3371 #endif /* CONFIG_PROC_SYSCTL */
3372 #else  /* !CONFIG_SCHEDSTATS */
init_schedstats(void)3373 static inline void init_schedstats(void) {}
3374 #endif /* CONFIG_SCHEDSTATS */
3375 
3376 /*
3377  * fork()/clone()-time setup:
3378  */
sched_fork(unsigned long clone_flags,struct task_struct * p)3379 int sched_fork(unsigned long clone_flags, struct task_struct *p)
3380 {
3381 	init_new_task_load(p);
3382 	__sched_fork(clone_flags, p);
3383 	/*
3384 	 * We mark the process as NEW here. This guarantees that
3385 	 * nobody will actually run it, and a signal or other external
3386 	 * event cannot wake it up and insert it on the runqueue either.
3387 	 */
3388 	p->state = TASK_NEW;
3389 
3390 	/*
3391 	 * Make sure we do not leak PI boosting priority to the child.
3392 	 */
3393 	p->prio = current->normal_prio;
3394 
3395 #ifdef CONFIG_SCHED_LATENCY_NICE
3396 	/* Propagate the parent's latency requirements to the child as well */
3397 	p->latency_prio = current->latency_prio;
3398 #endif
3399 
3400 	uclamp_fork(p);
3401 
3402 	/*
3403 	 * Revert to default priority/policy on fork if requested.
3404 	 */
3405 	if (unlikely(p->sched_reset_on_fork)) {
3406 		if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
3407 			p->policy = SCHED_NORMAL;
3408 #ifdef CONFIG_SCHED_RTG
3409 			if (current->rtg_depth != 0)
3410 				p->static_prio = current->static_prio;
3411 			else
3412 				p->static_prio = NICE_TO_PRIO(0);
3413 #else
3414 			p->static_prio = NICE_TO_PRIO(0);
3415 #endif
3416 			p->rt_priority = 0;
3417 		} else if (PRIO_TO_NICE(p->static_prio) < 0)
3418 			p->static_prio = NICE_TO_PRIO(0);
3419 
3420 		p->prio = p->normal_prio = p->static_prio;
3421 		set_load_weight(p, false);
3422 
3423 #ifdef CONFIG_SCHED_LATENCY_NICE
3424 		p->latency_prio = NICE_TO_LATENCY(0);
3425 		set_latency_weight(p);
3426 #endif
3427 
3428 		/*
3429 		 * We don't need the reset flag anymore after the fork. It has
3430 		 * fulfilled its duty:
3431 		 */
3432 		p->sched_reset_on_fork = 0;
3433 	}
3434 
3435 	if (dl_prio(p->prio))
3436 		return -EAGAIN;
3437 	else if (rt_prio(p->prio))
3438 		p->sched_class = &rt_sched_class;
3439 	else
3440 		p->sched_class = &fair_sched_class;
3441 
3442 	init_entity_runnable_average(&p->se);
3443 
3444 #ifdef CONFIG_SCHED_INFO
3445 	if (likely(sched_info_on()))
3446 		memset(&p->sched_info, 0, sizeof(p->sched_info));
3447 #endif
3448 #if defined(CONFIG_SMP)
3449 	p->on_cpu = 0;
3450 #endif
3451 	init_task_preempt_count(p);
3452 #ifdef CONFIG_SMP
3453 	plist_node_init(&p->pushable_tasks, MAX_PRIO);
3454 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
3455 #endif
3456 	return 0;
3457 }
3458 
sched_post_fork(struct task_struct * p,struct kernel_clone_args * kargs)3459 void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
3460 {
3461 	unsigned long flags;
3462 #ifdef CONFIG_CGROUP_SCHED
3463 	struct task_group *tg;
3464 #endif
3465 
3466 	raw_spin_lock_irqsave(&p->pi_lock, flags);
3467 #ifdef CONFIG_CGROUP_SCHED
3468 	tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
3469 			  struct task_group, css);
3470 	p->sched_task_group = autogroup_task_group(p, tg);
3471 #endif
3472 	rseq_migrate(p);
3473 	/*
3474 	 * We're setting the CPU for the first time, we don't migrate,
3475 	 * so use __set_task_cpu().
3476 	 */
3477 	__set_task_cpu(p, smp_processor_id());
3478 	if (p->sched_class->task_fork)
3479 		p->sched_class->task_fork(p);
3480 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3481 
3482 	uclamp_post_fork(p);
3483 }
3484 
to_ratio(u64 period,u64 runtime)3485 unsigned long to_ratio(u64 period, u64 runtime)
3486 {
3487 	if (runtime == RUNTIME_INF)
3488 		return BW_UNIT;
3489 
3490 	/*
3491 	 * Doing this here saves a lot of checks in all
3492 	 * the calling paths, and returning zero seems
3493 	 * safe for them anyway.
3494 	 */
3495 	if (period == 0)
3496 		return 0;
3497 
3498 	return div64_u64(runtime << BW_SHIFT, period);
3499 }
3500 
3501 /*
3502  * wake_up_new_task - wake up a newly created task for the first time.
3503  *
3504  * This function will do some initial scheduler statistics housekeeping
3505  * that must be done for every newly created context, then puts the task
3506  * on the runqueue and wakes it.
3507  */
wake_up_new_task(struct task_struct * p)3508 void wake_up_new_task(struct task_struct *p)
3509 {
3510 	struct rq_flags rf;
3511 	struct rq *rq;
3512 
3513 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
3514 	add_new_task_to_grp(p);
3515 
3516 	p->state = TASK_RUNNING;
3517 #ifdef CONFIG_SMP
3518 	/*
3519 	 * Fork balancing, do it here and not earlier because:
3520 	 *  - cpus_ptr can change in the fork path
3521 	 *  - any previously selected CPU might disappear through hotplug
3522 	 *
3523 	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
3524 	 * as we're not fully set-up yet.
3525 	 */
3526 	p->recent_used_cpu = task_cpu(p);
3527 	rseq_migrate(p);
3528 	__set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
3529 #endif
3530 	rq = __task_rq_lock(p, &rf);
3531 	update_rq_clock(rq);
3532 	post_init_entity_util_avg(p);
3533 
3534 	mark_task_starting(p);
3535 
3536 	activate_task(rq, p, ENQUEUE_NOCLOCK);
3537 	trace_sched_wakeup_new(p);
3538 	check_preempt_curr(rq, p, WF_FORK);
3539 #ifdef CONFIG_SMP
3540 	if (p->sched_class->task_woken) {
3541 		/*
3542 		 * Nothing relies on rq->lock after this, so its fine to
3543 		 * drop it.
3544 		 */
3545 		rq_unpin_lock(rq, &rf);
3546 		p->sched_class->task_woken(rq, p);
3547 		rq_repin_lock(rq, &rf);
3548 	}
3549 #endif
3550 	task_rq_unlock(rq, p, &rf);
3551 }
3552 
3553 #ifdef CONFIG_PREEMPT_NOTIFIERS
3554 
3555 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
3556 
preempt_notifier_inc(void)3557 void preempt_notifier_inc(void)
3558 {
3559 	static_branch_inc(&preempt_notifier_key);
3560 }
3561 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
3562 
preempt_notifier_dec(void)3563 void preempt_notifier_dec(void)
3564 {
3565 	static_branch_dec(&preempt_notifier_key);
3566 }
3567 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
3568 
3569 /**
3570  * preempt_notifier_register - tell me when current is being preempted & rescheduled
3571  * @notifier: notifier struct to register
3572  */
preempt_notifier_register(struct preempt_notifier * notifier)3573 void preempt_notifier_register(struct preempt_notifier *notifier)
3574 {
3575 	if (!static_branch_unlikely(&preempt_notifier_key))
3576 		WARN(1, "registering preempt_notifier while notifiers disabled\n");
3577 
3578 	hlist_add_head(&notifier->link, &current->preempt_notifiers);
3579 }
3580 EXPORT_SYMBOL_GPL(preempt_notifier_register);
3581 
3582 /**
3583  * preempt_notifier_unregister - no longer interested in preemption notifications
3584  * @notifier: notifier struct to unregister
3585  *
3586  * This is *not* safe to call from within a preemption notifier.
3587  */
preempt_notifier_unregister(struct preempt_notifier * notifier)3588 void preempt_notifier_unregister(struct preempt_notifier *notifier)
3589 {
3590 	hlist_del(&notifier->link);
3591 }
3592 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
3593 
__fire_sched_in_preempt_notifiers(struct task_struct * curr)3594 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
3595 {
3596 	struct preempt_notifier *notifier;
3597 
3598 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
3599 		notifier->ops->sched_in(notifier, raw_smp_processor_id());
3600 }
3601 
fire_sched_in_preempt_notifiers(struct task_struct * curr)3602 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
3603 {
3604 	if (static_branch_unlikely(&preempt_notifier_key))
3605 		__fire_sched_in_preempt_notifiers(curr);
3606 }
3607 
3608 static void
__fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)3609 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
3610 				   struct task_struct *next)
3611 {
3612 	struct preempt_notifier *notifier;
3613 
3614 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
3615 		notifier->ops->sched_out(notifier, next);
3616 }
3617 
3618 static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)3619 fire_sched_out_preempt_notifiers(struct task_struct *curr,
3620 				 struct task_struct *next)
3621 {
3622 	if (static_branch_unlikely(&preempt_notifier_key))
3623 		__fire_sched_out_preempt_notifiers(curr, next);
3624 }
3625 
3626 #else /* !CONFIG_PREEMPT_NOTIFIERS */
3627 
fire_sched_in_preempt_notifiers(struct task_struct * curr)3628 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
3629 {
3630 }
3631 
3632 static inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)3633 fire_sched_out_preempt_notifiers(struct task_struct *curr,
3634 				 struct task_struct *next)
3635 {
3636 }
3637 
3638 #endif /* CONFIG_PREEMPT_NOTIFIERS */
3639 
prepare_task(struct task_struct * next)3640 static inline void prepare_task(struct task_struct *next)
3641 {
3642 #ifdef CONFIG_SMP
3643 	/*
3644 	 * Claim the task as running, we do this before switching to it
3645 	 * such that any running task will have this set.
3646 	 *
3647 	 * See the ttwu() WF_ON_CPU case and its ordering comment.
3648 	 */
3649 	WRITE_ONCE(next->on_cpu, 1);
3650 #endif
3651 }
3652 
finish_task(struct task_struct * prev)3653 static inline void finish_task(struct task_struct *prev)
3654 {
3655 #ifdef CONFIG_SMP
3656 	/*
3657 	 * This must be the very last reference to @prev from this CPU. After
3658 	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
3659 	 * must ensure this doesn't happen until the switch is completely
3660 	 * finished.
3661 	 *
3662 	 * In particular, the load of prev->state in finish_task_switch() must
3663 	 * happen before this.
3664 	 *
3665 	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
3666 	 */
3667 	smp_store_release(&prev->on_cpu, 0);
3668 #endif
3669 }
3670 
3671 static inline void
prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf)3672 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
3673 {
3674 	/*
3675 	 * Since the runqueue lock will be released by the next
3676 	 * task (which is an invalid locking op but in the case
3677 	 * of the scheduler it's an obvious special-case), so we
3678 	 * do an early lockdep release here:
3679 	 */
3680 	rq_unpin_lock(rq, rf);
3681 	spin_release(&rq->lock.dep_map, _THIS_IP_);
3682 #ifdef CONFIG_DEBUG_SPINLOCK
3683 	/* this is a valid case when another task releases the spinlock */
3684 	rq->lock.owner = next;
3685 #endif
3686 }
3687 
finish_lock_switch(struct rq * rq)3688 static inline void finish_lock_switch(struct rq *rq)
3689 {
3690 	/*
3691 	 * If we are tracking spinlock dependencies then we have to
3692 	 * fix up the runqueue lock - which gets 'carried over' from
3693 	 * prev into current:
3694 	 */
3695 	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
3696 	raw_spin_unlock_irq(&rq->lock);
3697 }
3698 
3699 /*
3700  * NOP if the arch has not defined these:
3701  */
3702 
3703 #ifndef prepare_arch_switch
3704 # define prepare_arch_switch(next)	do { } while (0)
3705 #endif
3706 
3707 #ifndef finish_arch_post_lock_switch
3708 # define finish_arch_post_lock_switch()	do { } while (0)
3709 #endif
3710 
3711 /**
3712  * prepare_task_switch - prepare to switch tasks
3713  * @rq: the runqueue preparing to switch
3714  * @prev: the current task that is being switched out
3715  * @next: the task we are going to switch to.
3716  *
3717  * This is called with the rq lock held and interrupts off. It must
3718  * be paired with a subsequent finish_task_switch after the context
3719  * switch.
3720  *
3721  * prepare_task_switch sets up locking and calls architecture specific
3722  * hooks.
3723  */
3724 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)3725 prepare_task_switch(struct rq *rq, struct task_struct *prev,
3726 		    struct task_struct *next)
3727 {
3728 	kcov_prepare_switch(prev);
3729 	sched_info_switch(rq, prev, next);
3730 	perf_event_task_sched_out(prev, next);
3731 	rseq_preempt(prev);
3732 	fire_sched_out_preempt_notifiers(prev, next);
3733 	prepare_task(next);
3734 	prepare_arch_switch(next);
3735 }
3736 
3737 /**
3738  * finish_task_switch - clean up after a task-switch
3739  * @prev: the thread we just switched away from.
3740  *
3741  * finish_task_switch must be called after the context switch, paired
3742  * with a prepare_task_switch call before the context switch.
3743  * finish_task_switch will reconcile locking set up by prepare_task_switch,
3744  * and do any other architecture-specific cleanup actions.
3745  *
3746  * Note that we may have delayed dropping an mm in context_switch(). If
3747  * so, we finish that here outside of the runqueue lock. (Doing it
3748  * with the lock held can cause deadlocks; see schedule() for
3749  * details.)
3750  *
3751  * The context switch have flipped the stack from under us and restored the
3752  * local variables which were saved when this task called schedule() in the
3753  * past. prev == current is still correct but we need to recalculate this_rq
3754  * because prev may have moved to another CPU.
3755  */
finish_task_switch(struct task_struct * prev)3756 static struct rq *finish_task_switch(struct task_struct *prev)
3757 	__releases(rq->lock)
3758 {
3759 	struct rq *rq = this_rq();
3760 	struct mm_struct *mm = rq->prev_mm;
3761 	long prev_state;
3762 
3763 	/*
3764 	 * The previous task will have left us with a preempt_count of 2
3765 	 * because it left us after:
3766 	 *
3767 	 *	schedule()
3768 	 *	  preempt_disable();			// 1
3769 	 *	  __schedule()
3770 	 *	    raw_spin_lock_irq(&rq->lock)	// 2
3771 	 *
3772 	 * Also, see FORK_PREEMPT_COUNT.
3773 	 */
3774 	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
3775 		      "corrupted preempt_count: %s/%d/0x%x\n",
3776 		      current->comm, current->pid, preempt_count()))
3777 		preempt_count_set(FORK_PREEMPT_COUNT);
3778 
3779 	rq->prev_mm = NULL;
3780 
3781 	/*
3782 	 * A task struct has one reference for the use as "current".
3783 	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
3784 	 * schedule one last time. The schedule call will never return, and
3785 	 * the scheduled task must drop that reference.
3786 	 *
3787 	 * We must observe prev->state before clearing prev->on_cpu (in
3788 	 * finish_task), otherwise a concurrent wakeup can get prev
3789 	 * running on another CPU and we could rave with its RUNNING -> DEAD
3790 	 * transition, resulting in a double drop.
3791 	 */
3792 	prev_state = prev->state;
3793 	vtime_task_switch(prev);
3794 	perf_event_task_sched_in(prev, current);
3795 	finish_task(prev);
3796 	finish_lock_switch(rq);
3797 	finish_arch_post_lock_switch();
3798 	kcov_finish_switch(current);
3799 
3800 	fire_sched_in_preempt_notifiers(current);
3801 	/*
3802 	 * When switching through a kernel thread, the loop in
3803 	 * membarrier_{private,global}_expedited() may have observed that
3804 	 * kernel thread and not issued an IPI. It is therefore possible to
3805 	 * schedule between user->kernel->user threads without passing though
3806 	 * switch_mm(). Membarrier requires a barrier after storing to
3807 	 * rq->curr, before returning to userspace, so provide them here:
3808 	 *
3809 	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
3810 	 *   provided by mmdrop(),
3811 	 * - a sync_core for SYNC_CORE.
3812 	 */
3813 	if (mm) {
3814 		membarrier_mm_sync_core_before_usermode(mm);
3815 		mmdrop(mm);
3816 	}
3817 	if (unlikely(prev_state == TASK_DEAD)) {
3818 		if (prev->sched_class->task_dead)
3819 			prev->sched_class->task_dead(prev);
3820 
3821 		/*
3822 		 * Remove function-return probe instances associated with this
3823 		 * task and put them back on the free list.
3824 		 */
3825 		kprobe_flush_task(prev);
3826 
3827 		/* Task is done with its stack. */
3828 		put_task_stack(prev);
3829 
3830 		put_task_struct_rcu_user(prev);
3831 	}
3832 
3833 	tick_nohz_task_switch();
3834 	return rq;
3835 }
3836 
3837 #ifdef CONFIG_SMP
3838 
3839 /* rq->lock is NOT held, but preemption is disabled */
__balance_callback(struct rq * rq)3840 static void __balance_callback(struct rq *rq)
3841 {
3842 	struct callback_head *head, *next;
3843 	void (*func)(struct rq *rq);
3844 	unsigned long flags;
3845 
3846 	raw_spin_lock_irqsave(&rq->lock, flags);
3847 	head = rq->balance_callback;
3848 	rq->balance_callback = NULL;
3849 	while (head) {
3850 		func = (void (*)(struct rq *))head->func;
3851 		next = head->next;
3852 		head->next = NULL;
3853 		head = next;
3854 
3855 		func(rq);
3856 	}
3857 	raw_spin_unlock_irqrestore(&rq->lock, flags);
3858 }
3859 
balance_callback(struct rq * rq)3860 static inline void balance_callback(struct rq *rq)
3861 {
3862 	if (unlikely(rq->balance_callback))
3863 		__balance_callback(rq);
3864 }
3865 
3866 #else
3867 
balance_callback(struct rq * rq)3868 static inline void balance_callback(struct rq *rq)
3869 {
3870 }
3871 
3872 #endif
3873 
3874 /**
3875  * schedule_tail - first thing a freshly forked thread must call.
3876  * @prev: the thread we just switched away from.
3877  */
schedule_tail(struct task_struct * prev)3878 asmlinkage __visible void schedule_tail(struct task_struct *prev)
3879 	__releases(rq->lock)
3880 {
3881 	struct rq *rq;
3882 
3883 	/*
3884 	 * New tasks start with FORK_PREEMPT_COUNT, see there and
3885 	 * finish_task_switch() for details.
3886 	 *
3887 	 * finish_task_switch() will drop rq->lock() and lower preempt_count
3888 	 * and the preempt_enable() will end up enabling preemption (on
3889 	 * PREEMPT_COUNT kernels).
3890 	 */
3891 
3892 	rq = finish_task_switch(prev);
3893 	balance_callback(rq);
3894 	preempt_enable();
3895 
3896 	if (current->set_child_tid)
3897 		put_user(task_pid_vnr(current), current->set_child_tid);
3898 
3899 	calculate_sigpending();
3900 }
3901 
3902 /*
3903  * context_switch - switch to the new MM and the new thread's register state.
3904  */
3905 static __always_inline struct rq *
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf)3906 context_switch(struct rq *rq, struct task_struct *prev,
3907 	       struct task_struct *next, struct rq_flags *rf)
3908 {
3909 	prepare_task_switch(rq, prev, next);
3910 
3911 	/*
3912 	 * For paravirt, this is coupled with an exit in switch_to to
3913 	 * combine the page table reload and the switch backend into
3914 	 * one hypercall.
3915 	 */
3916 	arch_start_context_switch(prev);
3917 
3918 	/*
3919 	 * kernel -> kernel   lazy + transfer active
3920 	 *   user -> kernel   lazy + mmgrab() active
3921 	 *
3922 	 * kernel ->   user   switch + mmdrop() active
3923 	 *   user ->   user   switch
3924 	 */
3925 	if (!next->mm) {                                // to kernel
3926 		enter_lazy_tlb(prev->active_mm, next);
3927 
3928 		next->active_mm = prev->active_mm;
3929 		if (prev->mm)                           // from user
3930 			mmgrab(prev->active_mm);
3931 		else
3932 			prev->active_mm = NULL;
3933 	} else {                                        // to user
3934 		membarrier_switch_mm(rq, prev->active_mm, next->mm);
3935 		/*
3936 		 * sys_membarrier() requires an smp_mb() between setting
3937 		 * rq->curr / membarrier_switch_mm() and returning to userspace.
3938 		 *
3939 		 * The below provides this either through switch_mm(), or in
3940 		 * case 'prev->active_mm == next->mm' through
3941 		 * finish_task_switch()'s mmdrop().
3942 		 */
3943 		switch_mm_irqs_off(prev->active_mm, next->mm, next);
3944 
3945 		if (!prev->mm) {                        // from kernel
3946 			/* will mmdrop() in finish_task_switch(). */
3947 			rq->prev_mm = prev->active_mm;
3948 			prev->active_mm = NULL;
3949 		}
3950 	}
3951 
3952 	rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
3953 
3954 	prepare_lock_switch(rq, next, rf);
3955 
3956 	/* Here we just switch the register state and the stack. */
3957 	switch_to(prev, next, prev);
3958 	barrier();
3959 
3960 	return finish_task_switch(prev);
3961 }
3962 
3963 /*
3964  * nr_running and nr_context_switches:
3965  *
3966  * externally visible scheduler statistics: current number of runnable
3967  * threads, total number of context switches performed since bootup.
3968  */
nr_running(void)3969 unsigned long nr_running(void)
3970 {
3971 	unsigned long i, sum = 0;
3972 
3973 	for_each_online_cpu(i)
3974 		sum += cpu_rq(i)->nr_running;
3975 
3976 	return sum;
3977 }
3978 
3979 /*
3980  * Check if only the current task is running on the CPU.
3981  *
3982  * Caution: this function does not check that the caller has disabled
3983  * preemption, thus the result might have a time-of-check-to-time-of-use
3984  * race.  The caller is responsible to use it correctly, for example:
3985  *
3986  * - from a non-preemptible section (of course)
3987  *
3988  * - from a thread that is bound to a single CPU
3989  *
3990  * - in a loop with very short iterations (e.g. a polling loop)
3991  */
single_task_running(void)3992 bool single_task_running(void)
3993 {
3994 	return raw_rq()->nr_running == 1;
3995 }
3996 EXPORT_SYMBOL(single_task_running);
3997 
nr_context_switches(void)3998 unsigned long long nr_context_switches(void)
3999 {
4000 	int i;
4001 	unsigned long long sum = 0;
4002 
4003 	for_each_possible_cpu(i)
4004 		sum += cpu_rq(i)->nr_switches;
4005 
4006 	return sum;
4007 }
4008 
4009 /*
4010  * Consumers of these two interfaces, like for example the cpuidle menu
4011  * governor, are using nonsensical data. Preferring shallow idle state selection
4012  * for a CPU that has IO-wait which might not even end up running the task when
4013  * it does become runnable.
4014  */
4015 
nr_iowait_cpu(int cpu)4016 unsigned long nr_iowait_cpu(int cpu)
4017 {
4018 	return atomic_read(&cpu_rq(cpu)->nr_iowait);
4019 }
4020 
4021 /*
4022  * IO-wait accounting, and how its mostly bollocks (on SMP).
4023  *
4024  * The idea behind IO-wait account is to account the idle time that we could
4025  * have spend running if it were not for IO. That is, if we were to improve the
4026  * storage performance, we'd have a proportional reduction in IO-wait time.
4027  *
4028  * This all works nicely on UP, where, when a task blocks on IO, we account
4029  * idle time as IO-wait, because if the storage were faster, it could've been
4030  * running and we'd not be idle.
4031  *
4032  * This has been extended to SMP, by doing the same for each CPU. This however
4033  * is broken.
4034  *
4035  * Imagine for instance the case where two tasks block on one CPU, only the one
4036  * CPU will have IO-wait accounted, while the other has regular idle. Even
4037  * though, if the storage were faster, both could've ran at the same time,
4038  * utilising both CPUs.
4039  *
4040  * This means, that when looking globally, the current IO-wait accounting on
4041  * SMP is a lower bound, by reason of under accounting.
4042  *
4043  * Worse, since the numbers are provided per CPU, they are sometimes
4044  * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
4045  * associated with any one particular CPU, it can wake to another CPU than it
4046  * blocked on. This means the per CPU IO-wait number is meaningless.
4047  *
4048  * Task CPU affinities can make all that even more 'interesting'.
4049  */
4050 
nr_iowait(void)4051 unsigned long nr_iowait(void)
4052 {
4053 	unsigned long i, sum = 0;
4054 
4055 	for_each_possible_cpu(i)
4056 		sum += nr_iowait_cpu(i);
4057 
4058 	return sum;
4059 }
4060 
4061 #ifdef CONFIG_SMP
4062 
4063 /*
4064  * sched_exec - execve() is a valuable balancing opportunity, because at
4065  * this point the task has the smallest effective memory and cache footprint.
4066  */
sched_exec(void)4067 void sched_exec(void)
4068 {
4069 	struct task_struct *p = current;
4070 	unsigned long flags;
4071 	int dest_cpu;
4072 
4073 	raw_spin_lock_irqsave(&p->pi_lock, flags);
4074 	dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
4075 	if (dest_cpu == smp_processor_id())
4076 		goto unlock;
4077 
4078 	if (likely(cpu_active(dest_cpu) && likely(!cpu_isolated(dest_cpu)))) {
4079 		struct migration_arg arg = { p, dest_cpu };
4080 
4081 		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4082 		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
4083 		return;
4084 	}
4085 unlock:
4086 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4087 }
4088 
4089 #endif
4090 
4091 DEFINE_PER_CPU(struct kernel_stat, kstat);
4092 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
4093 
4094 EXPORT_PER_CPU_SYMBOL(kstat);
4095 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
4096 
4097 /*
4098  * The function fair_sched_class.update_curr accesses the struct curr
4099  * and its field curr->exec_start; when called from task_sched_runtime(),
4100  * we observe a high rate of cache misses in practice.
4101  * Prefetching this data results in improved performance.
4102  */
prefetch_curr_exec_start(struct task_struct * p)4103 static inline void prefetch_curr_exec_start(struct task_struct *p)
4104 {
4105 #ifdef CONFIG_FAIR_GROUP_SCHED
4106 	struct sched_entity *curr = (&p->se)->cfs_rq->curr;
4107 #else
4108 	struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
4109 #endif
4110 	prefetch(curr);
4111 	prefetch(&curr->exec_start);
4112 }
4113 
4114 /*
4115  * Return accounted runtime for the task.
4116  * In case the task is currently running, return the runtime plus current's
4117  * pending runtime that have not been accounted yet.
4118  */
task_sched_runtime(struct task_struct * p)4119 unsigned long long task_sched_runtime(struct task_struct *p)
4120 {
4121 	struct rq_flags rf;
4122 	struct rq *rq;
4123 	u64 ns;
4124 
4125 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
4126 	/*
4127 	 * 64-bit doesn't need locks to atomically read a 64-bit value.
4128 	 * So we have a optimization chance when the task's delta_exec is 0.
4129 	 * Reading ->on_cpu is racy, but this is ok.
4130 	 *
4131 	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
4132 	 * If we race with it entering CPU, unaccounted time is 0. This is
4133 	 * indistinguishable from the read occurring a few cycles earlier.
4134 	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
4135 	 * been accounted, so we're correct here as well.
4136 	 */
4137 	if (!p->on_cpu || !task_on_rq_queued(p))
4138 		return p->se.sum_exec_runtime;
4139 #endif
4140 
4141 	rq = task_rq_lock(p, &rf);
4142 	/*
4143 	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
4144 	 * project cycles that may never be accounted to this
4145 	 * thread, breaking clock_gettime().
4146 	 */
4147 	if (task_current(rq, p) && task_on_rq_queued(p)) {
4148 		prefetch_curr_exec_start(p);
4149 		update_rq_clock(rq);
4150 		p->sched_class->update_curr(rq);
4151 	}
4152 	ns = p->se.sum_exec_runtime;
4153 	task_rq_unlock(rq, p, &rf);
4154 
4155 	return ns;
4156 }
4157 
4158 /*
4159  * This function gets called by the timer code, with HZ frequency.
4160  * We call it with interrupts disabled.
4161  */
scheduler_tick(void)4162 void scheduler_tick(void)
4163 {
4164 	int cpu = smp_processor_id();
4165 	struct rq *rq = cpu_rq(cpu);
4166 	struct task_struct *curr = rq->curr;
4167 	struct rq_flags rf;
4168 	u64 wallclock;
4169 	unsigned long thermal_pressure;
4170 
4171 	arch_scale_freq_tick();
4172 	sched_clock_tick();
4173 
4174 	rq_lock(rq, &rf);
4175 
4176 	set_window_start(rq);
4177 	wallclock = sched_ktime_clock();
4178 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
4179 	update_rq_clock(rq);
4180 	thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
4181 	update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
4182 	curr->sched_class->task_tick(rq, curr, 0);
4183 	calc_global_load_tick(rq);
4184 	psi_task_tick(rq);
4185 
4186 	rq_unlock(rq, &rf);
4187 
4188 #ifdef CONFIG_SCHED_RTG
4189 	sched_update_rtg_tick(curr);
4190 #endif
4191 	perf_event_task_tick();
4192 
4193 #ifdef CONFIG_SMP
4194 	rq->idle_balance = idle_cpu(cpu);
4195 	trigger_load_balance(rq);
4196 
4197 #ifdef CONFIG_SCHED_EAS
4198 	if (curr->sched_class->check_for_migration)
4199 		curr->sched_class->check_for_migration(rq, curr);
4200 #endif
4201 #endif
4202 }
4203 
4204 #ifdef CONFIG_NO_HZ_FULL
4205 
4206 struct tick_work {
4207 	int			cpu;
4208 	atomic_t		state;
4209 	struct delayed_work	work;
4210 };
4211 /* Values for ->state, see diagram below. */
4212 #define TICK_SCHED_REMOTE_OFFLINE	0
4213 #define TICK_SCHED_REMOTE_OFFLINING	1
4214 #define TICK_SCHED_REMOTE_RUNNING	2
4215 
4216 /*
4217  * State diagram for ->state:
4218  *
4219  *
4220  *          TICK_SCHED_REMOTE_OFFLINE
4221  *                    |   ^
4222  *                    |   |
4223  *                    |   | sched_tick_remote()
4224  *                    |   |
4225  *                    |   |
4226  *                    +--TICK_SCHED_REMOTE_OFFLINING
4227  *                    |   ^
4228  *                    |   |
4229  * sched_tick_start() |   | sched_tick_stop()
4230  *                    |   |
4231  *                    V   |
4232  *          TICK_SCHED_REMOTE_RUNNING
4233  *
4234  *
4235  * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
4236  * and sched_tick_start() are happy to leave the state in RUNNING.
4237  */
4238 
4239 static struct tick_work __percpu *tick_work_cpu;
4240 
sched_tick_remote(struct work_struct * work)4241 static void sched_tick_remote(struct work_struct *work)
4242 {
4243 	struct delayed_work *dwork = to_delayed_work(work);
4244 	struct tick_work *twork = container_of(dwork, struct tick_work, work);
4245 	int cpu = twork->cpu;
4246 	struct rq *rq = cpu_rq(cpu);
4247 	struct task_struct *curr;
4248 	struct rq_flags rf;
4249 	u64 delta;
4250 	int os;
4251 
4252 	/*
4253 	 * Handle the tick only if it appears the remote CPU is running in full
4254 	 * dynticks mode. The check is racy by nature, but missing a tick or
4255 	 * having one too much is no big deal because the scheduler tick updates
4256 	 * statistics and checks timeslices in a time-independent way, regardless
4257 	 * of when exactly it is running.
4258 	 */
4259 	if (!tick_nohz_tick_stopped_cpu(cpu))
4260 		goto out_requeue;
4261 
4262 	rq_lock_irq(rq, &rf);
4263 	curr = rq->curr;
4264 	if (cpu_is_offline(cpu))
4265 		goto out_unlock;
4266 
4267 	update_rq_clock(rq);
4268 
4269 	if (!is_idle_task(curr)) {
4270 		/*
4271 		 * Make sure the next tick runs within a reasonable
4272 		 * amount of time.
4273 		 */
4274 		delta = rq_clock_task(rq) - curr->se.exec_start;
4275 		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
4276 	}
4277 	curr->sched_class->task_tick(rq, curr, 0);
4278 
4279 	calc_load_nohz_remote(rq);
4280 out_unlock:
4281 	rq_unlock_irq(rq, &rf);
4282 out_requeue:
4283 
4284 	/*
4285 	 * Run the remote tick once per second (1Hz). This arbitrary
4286 	 * frequency is large enough to avoid overload but short enough
4287 	 * to keep scheduler internal stats reasonably up to date.  But
4288 	 * first update state to reflect hotplug activity if required.
4289 	 */
4290 	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
4291 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
4292 	if (os == TICK_SCHED_REMOTE_RUNNING)
4293 		queue_delayed_work(system_unbound_wq, dwork, HZ);
4294 }
4295 
sched_tick_start(int cpu)4296 static void sched_tick_start(int cpu)
4297 {
4298 	int os;
4299 	struct tick_work *twork;
4300 
4301 	if (housekeeping_cpu(cpu, HK_FLAG_TICK))
4302 		return;
4303 
4304 	WARN_ON_ONCE(!tick_work_cpu);
4305 
4306 	twork = per_cpu_ptr(tick_work_cpu, cpu);
4307 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
4308 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
4309 	if (os == TICK_SCHED_REMOTE_OFFLINE) {
4310 		twork->cpu = cpu;
4311 		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
4312 		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
4313 	}
4314 }
4315 
4316 #ifdef CONFIG_HOTPLUG_CPU
sched_tick_stop(int cpu)4317 static void sched_tick_stop(int cpu)
4318 {
4319 	struct tick_work *twork;
4320 	int os;
4321 
4322 	if (housekeeping_cpu(cpu, HK_FLAG_TICK))
4323 		return;
4324 
4325 	WARN_ON_ONCE(!tick_work_cpu);
4326 
4327 	twork = per_cpu_ptr(tick_work_cpu, cpu);
4328 	/* There cannot be competing actions, but don't rely on stop-machine. */
4329 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
4330 	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
4331 	/* Don't cancel, as this would mess up the state machine. */
4332 }
4333 #endif /* CONFIG_HOTPLUG_CPU */
4334 
sched_tick_offload_init(void)4335 int __init sched_tick_offload_init(void)
4336 {
4337 	tick_work_cpu = alloc_percpu(struct tick_work);
4338 	BUG_ON(!tick_work_cpu);
4339 	return 0;
4340 }
4341 
4342 #else /* !CONFIG_NO_HZ_FULL */
sched_tick_start(int cpu)4343 static inline void sched_tick_start(int cpu) { }
sched_tick_stop(int cpu)4344 static inline void sched_tick_stop(int cpu) { }
4345 #endif
4346 
4347 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
4348 				defined(CONFIG_TRACE_PREEMPT_TOGGLE))
4349 /*
4350  * If the value passed in is equal to the current preempt count
4351  * then we just disabled preemption. Start timing the latency.
4352  */
preempt_latency_start(int val)4353 static inline void preempt_latency_start(int val)
4354 {
4355 	if (preempt_count() == val) {
4356 		unsigned long ip = get_lock_parent_ip();
4357 #ifdef CONFIG_DEBUG_PREEMPT
4358 		current->preempt_disable_ip = ip;
4359 #endif
4360 		trace_preempt_off(CALLER_ADDR0, ip);
4361 	}
4362 }
4363 
preempt_count_add(int val)4364 void preempt_count_add(int val)
4365 {
4366 #ifdef CONFIG_DEBUG_PREEMPT
4367 	/*
4368 	 * Underflow?
4369 	 */
4370 	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
4371 		return;
4372 #endif
4373 	__preempt_count_add(val);
4374 #ifdef CONFIG_DEBUG_PREEMPT
4375 	/*
4376 	 * Spinlock count overflowing soon?
4377 	 */
4378 	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
4379 				PREEMPT_MASK - 10);
4380 #endif
4381 	preempt_latency_start(val);
4382 }
4383 EXPORT_SYMBOL(preempt_count_add);
4384 NOKPROBE_SYMBOL(preempt_count_add);
4385 
4386 /*
4387  * If the value passed in equals to the current preempt count
4388  * then we just enabled preemption. Stop timing the latency.
4389  */
preempt_latency_stop(int val)4390 static inline void preempt_latency_stop(int val)
4391 {
4392 	if (preempt_count() == val)
4393 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
4394 }
4395 
preempt_count_sub(int val)4396 void preempt_count_sub(int val)
4397 {
4398 #ifdef CONFIG_DEBUG_PREEMPT
4399 	/*
4400 	 * Underflow?
4401 	 */
4402 	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
4403 		return;
4404 	/*
4405 	 * Is the spinlock portion underflowing?
4406 	 */
4407 	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
4408 			!(preempt_count() & PREEMPT_MASK)))
4409 		return;
4410 #endif
4411 
4412 	preempt_latency_stop(val);
4413 	__preempt_count_sub(val);
4414 }
4415 EXPORT_SYMBOL(preempt_count_sub);
4416 NOKPROBE_SYMBOL(preempt_count_sub);
4417 
4418 #else
preempt_latency_start(int val)4419 static inline void preempt_latency_start(int val) { }
preempt_latency_stop(int val)4420 static inline void preempt_latency_stop(int val) { }
4421 #endif
4422 
get_preempt_disable_ip(struct task_struct * p)4423 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
4424 {
4425 #ifdef CONFIG_DEBUG_PREEMPT
4426 	return p->preempt_disable_ip;
4427 #else
4428 	return 0;
4429 #endif
4430 }
4431 
4432 /*
4433  * Print scheduling while atomic bug:
4434  */
__schedule_bug(struct task_struct * prev)4435 static noinline void __schedule_bug(struct task_struct *prev)
4436 {
4437 	/* Save this before calling printk(), since that will clobber it */
4438 	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
4439 
4440 	if (oops_in_progress)
4441 		return;
4442 
4443 	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
4444 		prev->comm, prev->pid, preempt_count());
4445 
4446 	debug_show_held_locks(prev);
4447 	print_modules();
4448 	if (irqs_disabled())
4449 		print_irqtrace_events(prev);
4450 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
4451 	    && in_atomic_preempt_off()) {
4452 		pr_err("Preemption disabled at:");
4453 		print_ip_sym(KERN_ERR, preempt_disable_ip);
4454 	}
4455 	if (panic_on_warn)
4456 		panic("scheduling while atomic\n");
4457 
4458 	dump_stack();
4459 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
4460 }
4461 
4462 /*
4463  * Various schedule()-time debugging checks and statistics:
4464  */
schedule_debug(struct task_struct * prev,bool preempt)4465 static inline void schedule_debug(struct task_struct *prev, bool preempt)
4466 {
4467 #ifdef CONFIG_SCHED_STACK_END_CHECK
4468 	if (task_stack_end_corrupted(prev))
4469 		panic("corrupted stack end detected inside scheduler\n");
4470 
4471 	if (task_scs_end_corrupted(prev))
4472 		panic("corrupted shadow stack detected inside scheduler\n");
4473 #endif
4474 
4475 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
4476 	if (!preempt && prev->state && prev->non_block_count) {
4477 		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
4478 			prev->comm, prev->pid, prev->non_block_count);
4479 		dump_stack();
4480 		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
4481 	}
4482 #endif
4483 
4484 	if (unlikely(in_atomic_preempt_off())) {
4485 		__schedule_bug(prev);
4486 		preempt_count_set(PREEMPT_DISABLED);
4487 	}
4488 	rcu_sleep_check();
4489 
4490 	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4491 
4492 	schedstat_inc(this_rq()->sched_count);
4493 }
4494 
put_prev_task_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)4495 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
4496 				  struct rq_flags *rf)
4497 {
4498 #ifdef CONFIG_SMP
4499 	const struct sched_class *class;
4500 	/*
4501 	 * We must do the balancing pass before put_prev_task(), such
4502 	 * that when we release the rq->lock the task is in the same
4503 	 * state as before we took rq->lock.
4504 	 *
4505 	 * We can terminate the balance pass as soon as we know there is
4506 	 * a runnable task of @class priority or higher.
4507 	 */
4508 	for_class_range(class, prev->sched_class, &idle_sched_class) {
4509 		if (class->balance(rq, prev, rf))
4510 			break;
4511 	}
4512 #endif
4513 
4514 	put_prev_task(rq, prev);
4515 }
4516 
4517 /*
4518  * Pick up the highest-prio task:
4519  */
4520 static inline struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)4521 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
4522 {
4523 	const struct sched_class *class;
4524 	struct task_struct *p;
4525 
4526 	/*
4527 	 * Optimization: we know that if all tasks are in the fair class we can
4528 	 * call that function directly, but only if the @prev task wasn't of a
4529 	 * higher scheduling class, because otherwise those loose the
4530 	 * opportunity to pull in more work from other CPUs.
4531 	 */
4532 	if (likely(prev->sched_class <= &fair_sched_class &&
4533 		   rq->nr_running == rq->cfs.h_nr_running)) {
4534 
4535 		p = pick_next_task_fair(rq, prev, rf);
4536 		if (unlikely(p == RETRY_TASK))
4537 			goto restart;
4538 
4539 		/* Assumes fair_sched_class->next == idle_sched_class */
4540 		if (!p) {
4541 			put_prev_task(rq, prev);
4542 			p = pick_next_task_idle(rq);
4543 		}
4544 
4545 		return p;
4546 	}
4547 
4548 restart:
4549 	put_prev_task_balance(rq, prev, rf);
4550 
4551 	for_each_class(class) {
4552 		p = class->pick_next_task(rq);
4553 		if (p)
4554 			return p;
4555 	}
4556 
4557 	/* The idle class should always have a runnable task: */
4558 	BUG();
4559 }
4560 
4561 /*
4562  * __schedule() is the main scheduler function.
4563  *
4564  * The main means of driving the scheduler and thus entering this function are:
4565  *
4566  *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
4567  *
4568  *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
4569  *      paths. For example, see arch/x86/entry_64.S.
4570  *
4571  *      To drive preemption between tasks, the scheduler sets the flag in timer
4572  *      interrupt handler scheduler_tick().
4573  *
4574  *   3. Wakeups don't really cause entry into schedule(). They add a
4575  *      task to the run-queue and that's it.
4576  *
4577  *      Now, if the new task added to the run-queue preempts the current
4578  *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
4579  *      called on the nearest possible occasion:
4580  *
4581  *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
4582  *
4583  *         - in syscall or exception context, at the next outmost
4584  *           preempt_enable(). (this might be as soon as the wake_up()'s
4585  *           spin_unlock()!)
4586  *
4587  *         - in IRQ context, return from interrupt-handler to
4588  *           preemptible context
4589  *
4590  *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
4591  *         then at the next:
4592  *
4593  *          - cond_resched() call
4594  *          - explicit schedule() call
4595  *          - return from syscall or exception to user-space
4596  *          - return from interrupt-handler to user-space
4597  *
4598  * WARNING: must be called with preemption disabled!
4599  */
__schedule(bool preempt)4600 static void __sched notrace __schedule(bool preempt)
4601 {
4602 	struct task_struct *prev, *next;
4603 	unsigned long *switch_count;
4604 	unsigned long prev_state;
4605 	struct rq_flags rf;
4606 	struct rq *rq;
4607 	int cpu;
4608 	u64 wallclock;
4609 
4610 	cpu = smp_processor_id();
4611 	rq = cpu_rq(cpu);
4612 	prev = rq->curr;
4613 
4614 	schedule_debug(prev, preempt);
4615 
4616 	if (sched_feat(HRTICK))
4617 		hrtick_clear(rq);
4618 
4619 	local_irq_disable();
4620 	rcu_note_context_switch(preempt);
4621 
4622 	/*
4623 	 * Make sure that signal_pending_state()->signal_pending() below
4624 	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
4625 	 * done by the caller to avoid the race with signal_wake_up():
4626 	 *
4627 	 * __set_current_state(@state)		signal_wake_up()
4628 	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
4629 	 *					  wake_up_state(p, state)
4630 	 *   LOCK rq->lock			    LOCK p->pi_state
4631 	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
4632 	 *     if (signal_pending_state())	    if (p->state & @state)
4633 	 *
4634 	 * Also, the membarrier system call requires a full memory barrier
4635 	 * after coming from user-space, before storing to rq->curr.
4636 	 */
4637 	rq_lock(rq, &rf);
4638 	smp_mb__after_spinlock();
4639 
4640 	/* Promote REQ to ACT */
4641 	rq->clock_update_flags <<= 1;
4642 	update_rq_clock(rq);
4643 
4644 	switch_count = &prev->nivcsw;
4645 
4646 	/*
4647 	 * We must load prev->state once (task_struct::state is volatile), such
4648 	 * that:
4649 	 *
4650 	 *  - we form a control dependency vs deactivate_task() below.
4651 	 *  - ptrace_{,un}freeze_traced() can change ->state underneath us.
4652 	 */
4653 	prev_state = prev->state;
4654 	if (!preempt && prev_state) {
4655 		if (signal_pending_state(prev_state, prev)) {
4656 			prev->state = TASK_RUNNING;
4657 		} else {
4658 			prev->sched_contributes_to_load =
4659 				(prev_state & TASK_UNINTERRUPTIBLE) &&
4660 				!(prev_state & TASK_NOLOAD) &&
4661 				!(prev->flags & PF_FROZEN);
4662 
4663 			if (prev->sched_contributes_to_load)
4664 				rq->nr_uninterruptible++;
4665 
4666 			/*
4667 			 * __schedule()			ttwu()
4668 			 *   prev_state = prev->state;    if (p->on_rq && ...)
4669 			 *   if (prev_state)		    goto out;
4670 			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
4671 			 *				  p->state = TASK_WAKING
4672 			 *
4673 			 * Where __schedule() and ttwu() have matching control dependencies.
4674 			 *
4675 			 * After this, schedule() must not care about p->state any more.
4676 			 */
4677 			deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
4678 
4679 			if (prev->in_iowait) {
4680 				atomic_inc(&rq->nr_iowait);
4681 				delayacct_blkio_start();
4682 			}
4683 		}
4684 		switch_count = &prev->nvcsw;
4685 	}
4686 
4687 	next = pick_next_task(rq, prev, &rf);
4688 	clear_tsk_need_resched(prev);
4689 	clear_preempt_need_resched();
4690 
4691 	wallclock = sched_ktime_clock();
4692 	if (likely(prev != next)) {
4693 #ifdef CONFIG_SCHED_WALT
4694 		if (!prev->on_rq)
4695 			prev->last_sleep_ts = wallclock;
4696 #endif
4697 		update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
4698 		update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
4699 		rq->nr_switches++;
4700 		/*
4701 		 * RCU users of rcu_dereference(rq->curr) may not see
4702 		 * changes to task_struct made by pick_next_task().
4703 		 */
4704 		RCU_INIT_POINTER(rq->curr, next);
4705 		/*
4706 		 * The membarrier system call requires each architecture
4707 		 * to have a full memory barrier after updating
4708 		 * rq->curr, before returning to user-space.
4709 		 *
4710 		 * Here are the schemes providing that barrier on the
4711 		 * various architectures:
4712 		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
4713 		 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
4714 		 * - finish_lock_switch() for weakly-ordered
4715 		 *   architectures where spin_unlock is a full barrier,
4716 		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
4717 		 *   is a RELEASE barrier),
4718 		 */
4719 		++*switch_count;
4720 
4721 		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
4722 
4723 		trace_sched_switch(preempt, prev, next);
4724 
4725 		/* Also unlocks the rq: */
4726 		rq = context_switch(rq, prev, next, &rf);
4727 	} else {
4728 		update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0);
4729 		rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
4730 		rq_unlock_irq(rq, &rf);
4731 	}
4732 
4733 	balance_callback(rq);
4734 }
4735 
do_task_dead(void)4736 void __noreturn do_task_dead(void)
4737 {
4738 	/* Causes final put_task_struct in finish_task_switch(): */
4739 	set_special_state(TASK_DEAD);
4740 
4741 	/* Tell freezer to ignore us: */
4742 	current->flags |= PF_NOFREEZE;
4743 
4744 	__schedule(false);
4745 	BUG();
4746 
4747 	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
4748 	for (;;)
4749 		cpu_relax();
4750 }
4751 
sched_submit_work(struct task_struct * tsk)4752 static inline void sched_submit_work(struct task_struct *tsk)
4753 {
4754 	unsigned int task_flags;
4755 
4756 	if (!tsk->state)
4757 		return;
4758 
4759 	task_flags = tsk->flags;
4760 	/*
4761 	 * If a worker went to sleep, notify and ask workqueue whether
4762 	 * it wants to wake up a task to maintain concurrency.
4763 	 * As this function is called inside the schedule() context,
4764 	 * we disable preemption to avoid it calling schedule() again
4765 	 * in the possible wakeup of a kworker and because wq_worker_sleeping()
4766 	 * requires it.
4767 	 */
4768 	if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
4769 		preempt_disable();
4770 		if (task_flags & PF_WQ_WORKER)
4771 			wq_worker_sleeping(tsk);
4772 		else
4773 			io_wq_worker_sleeping(tsk);
4774 		preempt_enable_no_resched();
4775 	}
4776 
4777 	if (tsk_is_pi_blocked(tsk))
4778 		return;
4779 
4780 	/*
4781 	 * If we are going to sleep and we have plugged IO queued,
4782 	 * make sure to submit it to avoid deadlocks.
4783 	 */
4784 	if (blk_needs_flush_plug(tsk))
4785 		blk_schedule_flush_plug(tsk);
4786 }
4787 
sched_update_worker(struct task_struct * tsk)4788 static void sched_update_worker(struct task_struct *tsk)
4789 {
4790 	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
4791 		if (tsk->flags & PF_WQ_WORKER)
4792 			wq_worker_running(tsk);
4793 		else
4794 			io_wq_worker_running(tsk);
4795 	}
4796 }
4797 
schedule(void)4798 asmlinkage __visible void __sched schedule(void)
4799 {
4800 	struct task_struct *tsk = current;
4801 
4802 	sched_submit_work(tsk);
4803 	do {
4804 		preempt_disable();
4805 		__schedule(false);
4806 		sched_preempt_enable_no_resched();
4807 	} while (need_resched());
4808 	sched_update_worker(tsk);
4809 }
4810 EXPORT_SYMBOL(schedule);
4811 
4812 /*
4813  * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
4814  * state (have scheduled out non-voluntarily) by making sure that all
4815  * tasks have either left the run queue or have gone into user space.
4816  * As idle tasks do not do either, they must not ever be preempted
4817  * (schedule out non-voluntarily).
4818  *
4819  * schedule_idle() is similar to schedule_preempt_disable() except that it
4820  * never enables preemption because it does not call sched_submit_work().
4821  */
schedule_idle(void)4822 void __sched schedule_idle(void)
4823 {
4824 	/*
4825 	 * As this skips calling sched_submit_work(), which the idle task does
4826 	 * regardless because that function is a nop when the task is in a
4827 	 * TASK_RUNNING state, make sure this isn't used someplace that the
4828 	 * current task can be in any other state. Note, idle is always in the
4829 	 * TASK_RUNNING state.
4830 	 */
4831 	WARN_ON_ONCE(current->state);
4832 	do {
4833 		__schedule(false);
4834 	} while (need_resched());
4835 }
4836 
4837 #ifdef CONFIG_CONTEXT_TRACKING
schedule_user(void)4838 asmlinkage __visible void __sched schedule_user(void)
4839 {
4840 	/*
4841 	 * If we come here after a random call to set_need_resched(),
4842 	 * or we have been woken up remotely but the IPI has not yet arrived,
4843 	 * we haven't yet exited the RCU idle mode. Do it here manually until
4844 	 * we find a better solution.
4845 	 *
4846 	 * NB: There are buggy callers of this function.  Ideally we
4847 	 * should warn if prev_state != CONTEXT_USER, but that will trigger
4848 	 * too frequently to make sense yet.
4849 	 */
4850 	enum ctx_state prev_state = exception_enter();
4851 	schedule();
4852 	exception_exit(prev_state);
4853 }
4854 #endif
4855 
4856 /**
4857  * schedule_preempt_disabled - called with preemption disabled
4858  *
4859  * Returns with preemption disabled. Note: preempt_count must be 1
4860  */
schedule_preempt_disabled(void)4861 void __sched schedule_preempt_disabled(void)
4862 {
4863 	sched_preempt_enable_no_resched();
4864 	schedule();
4865 	preempt_disable();
4866 }
4867 
preempt_schedule_common(void)4868 static void __sched notrace preempt_schedule_common(void)
4869 {
4870 	do {
4871 		/*
4872 		 * Because the function tracer can trace preempt_count_sub()
4873 		 * and it also uses preempt_enable/disable_notrace(), if
4874 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
4875 		 * by the function tracer will call this function again and
4876 		 * cause infinite recursion.
4877 		 *
4878 		 * Preemption must be disabled here before the function
4879 		 * tracer can trace. Break up preempt_disable() into two
4880 		 * calls. One to disable preemption without fear of being
4881 		 * traced. The other to still record the preemption latency,
4882 		 * which can also be traced by the function tracer.
4883 		 */
4884 		preempt_disable_notrace();
4885 		preempt_latency_start(1);
4886 		__schedule(true);
4887 		preempt_latency_stop(1);
4888 		preempt_enable_no_resched_notrace();
4889 
4890 		/*
4891 		 * Check again in case we missed a preemption opportunity
4892 		 * between schedule and now.
4893 		 */
4894 	} while (need_resched());
4895 }
4896 
4897 #ifdef CONFIG_PREEMPTION
4898 /*
4899  * This is the entry point to schedule() from in-kernel preemption
4900  * off of preempt_enable.
4901  */
preempt_schedule(void)4902 asmlinkage __visible void __sched notrace preempt_schedule(void)
4903 {
4904 	/*
4905 	 * If there is a non-zero preempt_count or interrupts are disabled,
4906 	 * we do not want to preempt the current task. Just return..
4907 	 */
4908 	if (likely(!preemptible()))
4909 		return;
4910 
4911 	preempt_schedule_common();
4912 }
4913 NOKPROBE_SYMBOL(preempt_schedule);
4914 EXPORT_SYMBOL(preempt_schedule);
4915 
4916 /**
4917  * preempt_schedule_notrace - preempt_schedule called by tracing
4918  *
4919  * The tracing infrastructure uses preempt_enable_notrace to prevent
4920  * recursion and tracing preempt enabling caused by the tracing
4921  * infrastructure itself. But as tracing can happen in areas coming
4922  * from userspace or just about to enter userspace, a preempt enable
4923  * can occur before user_exit() is called. This will cause the scheduler
4924  * to be called when the system is still in usermode.
4925  *
4926  * To prevent this, the preempt_enable_notrace will use this function
4927  * instead of preempt_schedule() to exit user context if needed before
4928  * calling the scheduler.
4929  */
preempt_schedule_notrace(void)4930 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
4931 {
4932 	enum ctx_state prev_ctx;
4933 
4934 	if (likely(!preemptible()))
4935 		return;
4936 
4937 	do {
4938 		/*
4939 		 * Because the function tracer can trace preempt_count_sub()
4940 		 * and it also uses preempt_enable/disable_notrace(), if
4941 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
4942 		 * by the function tracer will call this function again and
4943 		 * cause infinite recursion.
4944 		 *
4945 		 * Preemption must be disabled here before the function
4946 		 * tracer can trace. Break up preempt_disable() into two
4947 		 * calls. One to disable preemption without fear of being
4948 		 * traced. The other to still record the preemption latency,
4949 		 * which can also be traced by the function tracer.
4950 		 */
4951 		preempt_disable_notrace();
4952 		preempt_latency_start(1);
4953 		/*
4954 		 * Needs preempt disabled in case user_exit() is traced
4955 		 * and the tracer calls preempt_enable_notrace() causing
4956 		 * an infinite recursion.
4957 		 */
4958 		prev_ctx = exception_enter();
4959 		__schedule(true);
4960 		exception_exit(prev_ctx);
4961 
4962 		preempt_latency_stop(1);
4963 		preempt_enable_no_resched_notrace();
4964 	} while (need_resched());
4965 }
4966 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
4967 
4968 #endif /* CONFIG_PREEMPTION */
4969 
4970 /*
4971  * This is the entry point to schedule() from kernel preemption
4972  * off of irq context.
4973  * Note, that this is called and return with irqs disabled. This will
4974  * protect us against recursive calling from irq.
4975  */
preempt_schedule_irq(void)4976 asmlinkage __visible void __sched preempt_schedule_irq(void)
4977 {
4978 	enum ctx_state prev_state;
4979 
4980 	/* Catch callers which need to be fixed */
4981 	BUG_ON(preempt_count() || !irqs_disabled());
4982 
4983 	prev_state = exception_enter();
4984 
4985 	do {
4986 		preempt_disable();
4987 		local_irq_enable();
4988 		__schedule(true);
4989 		local_irq_disable();
4990 		sched_preempt_enable_no_resched();
4991 	} while (need_resched());
4992 
4993 	exception_exit(prev_state);
4994 }
4995 
default_wake_function(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)4996 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
4997 			  void *key)
4998 {
4999 	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
5000 	return try_to_wake_up(curr->private, mode, wake_flags);
5001 }
5002 EXPORT_SYMBOL(default_wake_function);
5003 
__setscheduler_prio(struct task_struct * p,int prio)5004 static void __setscheduler_prio(struct task_struct *p, int prio)
5005 {
5006 	if (dl_prio(prio))
5007 		p->sched_class = &dl_sched_class;
5008 	else if (rt_prio(prio))
5009 		p->sched_class = &rt_sched_class;
5010 	else
5011 		p->sched_class = &fair_sched_class;
5012 
5013 	p->prio = prio;
5014 }
5015 
5016 #ifdef CONFIG_RT_MUTEXES
5017 
__rt_effective_prio(struct task_struct * pi_task,int prio)5018 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
5019 {
5020 	if (pi_task)
5021 		prio = min(prio, pi_task->prio);
5022 
5023 	return prio;
5024 }
5025 
rt_effective_prio(struct task_struct * p,int prio)5026 static inline int rt_effective_prio(struct task_struct *p, int prio)
5027 {
5028 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
5029 
5030 	return __rt_effective_prio(pi_task, prio);
5031 }
5032 
5033 /*
5034  * rt_mutex_setprio - set the current priority of a task
5035  * @p: task to boost
5036  * @pi_task: donor task
5037  *
5038  * This function changes the 'effective' priority of a task. It does
5039  * not touch ->normal_prio like __setscheduler().
5040  *
5041  * Used by the rt_mutex code to implement priority inheritance
5042  * logic. Call site only calls if the priority of the task changed.
5043  */
rt_mutex_setprio(struct task_struct * p,struct task_struct * pi_task)5044 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
5045 {
5046 	int prio, oldprio, queued, running, queue_flag =
5047 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
5048 	const struct sched_class *prev_class;
5049 	struct rq_flags rf;
5050 	struct rq *rq;
5051 
5052 	/* XXX used to be waiter->prio, not waiter->task->prio */
5053 	prio = __rt_effective_prio(pi_task, p->normal_prio);
5054 
5055 	/*
5056 	 * If nothing changed; bail early.
5057 	 */
5058 	if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
5059 		return;
5060 
5061 	rq = __task_rq_lock(p, &rf);
5062 	update_rq_clock(rq);
5063 	/*
5064 	 * Set under pi_lock && rq->lock, such that the value can be used under
5065 	 * either lock.
5066 	 *
5067 	 * Note that there is loads of tricky to make this pointer cache work
5068 	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
5069 	 * ensure a task is de-boosted (pi_task is set to NULL) before the
5070 	 * task is allowed to run again (and can exit). This ensures the pointer
5071 	 * points to a blocked task -- which guaratees the task is present.
5072 	 */
5073 	p->pi_top_task = pi_task;
5074 
5075 	/*
5076 	 * For FIFO/RR we only need to set prio, if that matches we're done.
5077 	 */
5078 	if (prio == p->prio && !dl_prio(prio))
5079 		goto out_unlock;
5080 
5081 	/*
5082 	 * Idle task boosting is a nono in general. There is one
5083 	 * exception, when PREEMPT_RT and NOHZ is active:
5084 	 *
5085 	 * The idle task calls get_next_timer_interrupt() and holds
5086 	 * the timer wheel base->lock on the CPU and another CPU wants
5087 	 * to access the timer (probably to cancel it). We can safely
5088 	 * ignore the boosting request, as the idle CPU runs this code
5089 	 * with interrupts disabled and will complete the lock
5090 	 * protected section without being interrupted. So there is no
5091 	 * real need to boost.
5092 	 */
5093 	if (unlikely(p == rq->idle)) {
5094 		WARN_ON(p != rq->curr);
5095 		WARN_ON(p->pi_blocked_on);
5096 		goto out_unlock;
5097 	}
5098 
5099 	trace_sched_pi_setprio(p, pi_task);
5100 	oldprio = p->prio;
5101 
5102 	if (oldprio == prio)
5103 		queue_flag &= ~DEQUEUE_MOVE;
5104 
5105 	prev_class = p->sched_class;
5106 	queued = task_on_rq_queued(p);
5107 	running = task_current(rq, p);
5108 	if (queued)
5109 		dequeue_task(rq, p, queue_flag);
5110 	if (running)
5111 		put_prev_task(rq, p);
5112 
5113 	/*
5114 	 * Boosting condition are:
5115 	 * 1. -rt task is running and holds mutex A
5116 	 *      --> -dl task blocks on mutex A
5117 	 *
5118 	 * 2. -dl task is running and holds mutex A
5119 	 *      --> -dl task blocks on mutex A and could preempt the
5120 	 *          running task
5121 	 */
5122 	if (dl_prio(prio)) {
5123 		if (!dl_prio(p->normal_prio) ||
5124 		    (pi_task && dl_prio(pi_task->prio) &&
5125 		     dl_entity_preempt(&pi_task->dl, &p->dl))) {
5126 			p->dl.pi_se = pi_task->dl.pi_se;
5127 			queue_flag |= ENQUEUE_REPLENISH;
5128 		} else {
5129 			p->dl.pi_se = &p->dl;
5130 		}
5131 	} else if (rt_prio(prio)) {
5132 		if (dl_prio(oldprio))
5133 			p->dl.pi_se = &p->dl;
5134 		if (oldprio < prio)
5135 			queue_flag |= ENQUEUE_HEAD;
5136 	} else {
5137 		if (dl_prio(oldprio))
5138 			p->dl.pi_se = &p->dl;
5139 		if (rt_prio(oldprio))
5140 			p->rt.timeout = 0;
5141 	}
5142 
5143 	__setscheduler_prio(p, prio);
5144 
5145 	if (queued)
5146 		enqueue_task(rq, p, queue_flag);
5147 	if (running)
5148 		set_next_task(rq, p);
5149 
5150 	check_class_changed(rq, p, prev_class, oldprio);
5151 out_unlock:
5152 	/* Avoid rq from going away on us: */
5153 	preempt_disable();
5154 	__task_rq_unlock(rq, &rf);
5155 
5156 	balance_callback(rq);
5157 	preempt_enable();
5158 }
5159 #else
rt_effective_prio(struct task_struct * p,int prio)5160 static inline int rt_effective_prio(struct task_struct *p, int prio)
5161 {
5162 	return prio;
5163 }
5164 #endif
5165 
set_user_nice(struct task_struct * p,long nice)5166 void set_user_nice(struct task_struct *p, long nice)
5167 {
5168 	bool queued, running;
5169 	int old_prio;
5170 	struct rq_flags rf;
5171 	struct rq *rq;
5172 
5173 	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
5174 		return;
5175 	/*
5176 	 * We have to be careful, if called from sys_setpriority(),
5177 	 * the task might be in the middle of scheduling on another CPU.
5178 	 */
5179 	rq = task_rq_lock(p, &rf);
5180 	update_rq_clock(rq);
5181 
5182 	/*
5183 	 * The RT priorities are set via sched_setscheduler(), but we still
5184 	 * allow the 'normal' nice value to be set - but as expected
5185 	 * it wont have any effect on scheduling until the task is
5186 	 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
5187 	 */
5188 	if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
5189 		p->static_prio = NICE_TO_PRIO(nice);
5190 		goto out_unlock;
5191 	}
5192 	queued = task_on_rq_queued(p);
5193 	running = task_current(rq, p);
5194 	if (queued)
5195 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
5196 	if (running)
5197 		put_prev_task(rq, p);
5198 
5199 	p->static_prio = NICE_TO_PRIO(nice);
5200 	set_load_weight(p, true);
5201 	old_prio = p->prio;
5202 	p->prio = effective_prio(p);
5203 
5204 	if (queued)
5205 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
5206 	if (running)
5207 		set_next_task(rq, p);
5208 
5209 	/*
5210 	 * If the task increased its priority or is running and
5211 	 * lowered its priority, then reschedule its CPU:
5212 	 */
5213 	p->sched_class->prio_changed(rq, p, old_prio);
5214 
5215 out_unlock:
5216 	task_rq_unlock(rq, p, &rf);
5217 }
5218 EXPORT_SYMBOL(set_user_nice);
5219 
5220 /*
5221  * can_nice - check if a task can reduce its nice value
5222  * @p: task
5223  * @nice: nice value
5224  */
can_nice(const struct task_struct * p,const int nice)5225 int can_nice(const struct task_struct *p, const int nice)
5226 {
5227 	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
5228 	int nice_rlim = nice_to_rlimit(nice);
5229 
5230 	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
5231 		capable(CAP_SYS_NICE));
5232 }
5233 
5234 #ifdef __ARCH_WANT_SYS_NICE
5235 
5236 /*
5237  * sys_nice - change the priority of the current process.
5238  * @increment: priority increment
5239  *
5240  * sys_setpriority is a more generic, but much slower function that
5241  * does similar things.
5242  */
SYSCALL_DEFINE1(nice,int,increment)5243 SYSCALL_DEFINE1(nice, int, increment)
5244 {
5245 	long nice, retval;
5246 
5247 	/*
5248 	 * Setpriority might change our priority at the same moment.
5249 	 * We don't have to worry. Conceptually one call occurs first
5250 	 * and we have a single winner.
5251 	 */
5252 	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
5253 	nice = task_nice(current) + increment;
5254 
5255 	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
5256 	if (increment < 0 && !can_nice(current, nice))
5257 		return -EPERM;
5258 
5259 	retval = security_task_setnice(current, nice);
5260 	if (retval)
5261 		return retval;
5262 
5263 	set_user_nice(current, nice);
5264 	return 0;
5265 }
5266 
5267 #endif
5268 
5269 /**
5270  * task_prio - return the priority value of a given task.
5271  * @p: the task in question.
5272  *
5273  * Return: The priority value as seen by users in /proc.
5274  * RT tasks are offset by -200. Normal tasks are centered
5275  * around 0, value goes from -16 to +15.
5276  */
task_prio(const struct task_struct * p)5277 int task_prio(const struct task_struct *p)
5278 {
5279 	return p->prio - MAX_RT_PRIO;
5280 }
5281 
5282 /**
5283  * idle_cpu - is a given CPU idle currently?
5284  * @cpu: the processor in question.
5285  *
5286  * Return: 1 if the CPU is currently idle. 0 otherwise.
5287  */
idle_cpu(int cpu)5288 int idle_cpu(int cpu)
5289 {
5290 	struct rq *rq = cpu_rq(cpu);
5291 
5292 	if (rq->curr != rq->idle)
5293 		return 0;
5294 
5295 	if (rq->nr_running)
5296 		return 0;
5297 
5298 #ifdef CONFIG_SMP
5299 	if (rq->ttwu_pending)
5300 		return 0;
5301 #endif
5302 
5303 	return 1;
5304 }
5305 
5306 /**
5307  * available_idle_cpu - is a given CPU idle for enqueuing work.
5308  * @cpu: the CPU in question.
5309  *
5310  * Return: 1 if the CPU is currently idle. 0 otherwise.
5311  */
available_idle_cpu(int cpu)5312 int available_idle_cpu(int cpu)
5313 {
5314 	if (!idle_cpu(cpu))
5315 		return 0;
5316 
5317 	if (vcpu_is_preempted(cpu))
5318 		return 0;
5319 
5320 	return 1;
5321 }
5322 
5323 /**
5324  * idle_task - return the idle task for a given CPU.
5325  * @cpu: the processor in question.
5326  *
5327  * Return: The idle task for the CPU @cpu.
5328  */
idle_task(int cpu)5329 struct task_struct *idle_task(int cpu)
5330 {
5331 	return cpu_rq(cpu)->idle;
5332 }
5333 
5334 /**
5335  * find_process_by_pid - find a process with a matching PID value.
5336  * @pid: the pid in question.
5337  *
5338  * The task of @pid, if found. %NULL otherwise.
5339  */
find_process_by_pid(pid_t pid)5340 static struct task_struct *find_process_by_pid(pid_t pid)
5341 {
5342 	return pid ? find_task_by_vpid(pid) : current;
5343 }
5344 
5345 /*
5346  * sched_setparam() passes in -1 for its policy, to let the functions
5347  * it calls know not to change it.
5348  */
5349 #define SETPARAM_POLICY	-1
5350 
__setscheduler_params(struct task_struct * p,const struct sched_attr * attr)5351 static void __setscheduler_params(struct task_struct *p,
5352 		const struct sched_attr *attr)
5353 {
5354 	int policy = attr->sched_policy;
5355 
5356 	if (policy == SETPARAM_POLICY)
5357 		policy = p->policy;
5358 
5359 	p->policy = policy;
5360 
5361 	if (dl_policy(policy))
5362 		__setparam_dl(p, attr);
5363 	else if (fair_policy(policy))
5364 		p->static_prio = NICE_TO_PRIO(attr->sched_nice);
5365 
5366 	/*
5367 	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
5368 	 * !rt_policy. Always setting this ensures that things like
5369 	 * getparam()/getattr() don't report silly values for !rt tasks.
5370 	 */
5371 	p->rt_priority = attr->sched_priority;
5372 	p->normal_prio = normal_prio(p);
5373 	set_load_weight(p, true);
5374 }
5375 
5376 /*
5377  * Check the target process has a UID that matches the current process's:
5378  */
check_same_owner(struct task_struct * p)5379 static bool check_same_owner(struct task_struct *p)
5380 {
5381 	const struct cred *cred = current_cred(), *pcred;
5382 	bool match;
5383 
5384 	rcu_read_lock();
5385 	pcred = __task_cred(p);
5386 	match = (uid_eq(cred->euid, pcred->euid) ||
5387 		 uid_eq(cred->euid, pcred->uid));
5388 	rcu_read_unlock();
5389 	return match;
5390 }
5391 
__sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,bool user,bool pi)5392 static int __sched_setscheduler(struct task_struct *p,
5393 				const struct sched_attr *attr,
5394 				bool user, bool pi)
5395 {
5396 	int oldpolicy = -1, policy = attr->sched_policy;
5397 	int retval, oldprio, newprio, queued, running;
5398 	const struct sched_class *prev_class;
5399 	struct rq_flags rf;
5400 	int reset_on_fork;
5401 	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
5402 	struct rq *rq;
5403 
5404 	/* The pi code expects interrupts enabled */
5405 	BUG_ON(pi && in_interrupt());
5406 recheck:
5407 	/* Double check policy once rq lock held: */
5408 	if (policy < 0) {
5409 		reset_on_fork = p->sched_reset_on_fork;
5410 		policy = oldpolicy = p->policy;
5411 	} else {
5412 		reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
5413 
5414 		if (!valid_policy(policy))
5415 			return -EINVAL;
5416 	}
5417 
5418 	if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
5419 		return -EINVAL;
5420 
5421 	/*
5422 	 * Valid priorities for SCHED_FIFO and SCHED_RR are
5423 	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
5424 	 * SCHED_BATCH and SCHED_IDLE is 0.
5425 	 */
5426 	if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
5427 	    (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
5428 		return -EINVAL;
5429 	if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
5430 	    (rt_policy(policy) != (attr->sched_priority != 0)))
5431 		return -EINVAL;
5432 
5433 	/*
5434 	 * Allow unprivileged RT tasks to decrease priority:
5435 	 */
5436 	if (user && !capable(CAP_SYS_NICE)) {
5437 		if (fair_policy(policy)) {
5438 			if (attr->sched_nice < task_nice(p) &&
5439 			    !can_nice(p, attr->sched_nice))
5440 				return -EPERM;
5441 		}
5442 
5443 		if (rt_policy(policy)) {
5444 			unsigned long rlim_rtprio =
5445 					task_rlimit(p, RLIMIT_RTPRIO);
5446 
5447 			/* Can't set/change the rt policy: */
5448 			if (policy != p->policy && !rlim_rtprio)
5449 				return -EPERM;
5450 
5451 			/* Can't increase priority: */
5452 			if (attr->sched_priority > p->rt_priority &&
5453 			    attr->sched_priority > rlim_rtprio)
5454 				return -EPERM;
5455 		}
5456 
5457 		 /*
5458 		  * Can't set/change SCHED_DEADLINE policy at all for now
5459 		  * (safest behavior); in the future we would like to allow
5460 		  * unprivileged DL tasks to increase their relative deadline
5461 		  * or reduce their runtime (both ways reducing utilization)
5462 		  */
5463 		if (dl_policy(policy))
5464 			return -EPERM;
5465 
5466 		/*
5467 		 * Treat SCHED_IDLE as nice 20. Only allow a switch to
5468 		 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
5469 		 */
5470 		if (task_has_idle_policy(p) && !idle_policy(policy)) {
5471 			if (!can_nice(p, task_nice(p)))
5472 				return -EPERM;
5473 		}
5474 
5475 		/* Can't change other user's priorities: */
5476 		if (!check_same_owner(p))
5477 			return -EPERM;
5478 
5479 		/* Normal users shall not reset the sched_reset_on_fork flag: */
5480 		if (p->sched_reset_on_fork && !reset_on_fork)
5481 			return -EPERM;
5482 	}
5483 
5484 	if (user) {
5485 		if (attr->sched_flags & SCHED_FLAG_SUGOV)
5486 			return -EINVAL;
5487 
5488 		retval = security_task_setscheduler(p);
5489 		if (retval)
5490 			return retval;
5491 	}
5492 
5493 	/* Update task specific "requested" clamps */
5494 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
5495 		retval = uclamp_validate(p, attr);
5496 		if (retval)
5497 			return retval;
5498 	}
5499 
5500 	if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE) {
5501 		retval = latency_nice_validate(p, user, attr);
5502 		if (retval)
5503 			return retval;
5504 	}
5505 
5506 	if (pi)
5507 		cpuset_read_lock();
5508 
5509 	/*
5510 	 * Make sure no PI-waiters arrive (or leave) while we are
5511 	 * changing the priority of the task:
5512 	 *
5513 	 * To be able to change p->policy safely, the appropriate
5514 	 * runqueue lock must be held.
5515 	 */
5516 	rq = task_rq_lock(p, &rf);
5517 	update_rq_clock(rq);
5518 
5519 	/*
5520 	 * Changing the policy of the stop threads its a very bad idea:
5521 	 */
5522 	if (p == rq->stop) {
5523 		retval = -EINVAL;
5524 		goto unlock;
5525 	}
5526 
5527 	/*
5528 	 * If not changing anything there's no need to proceed further,
5529 	 * but store a possible modification of reset_on_fork.
5530 	 */
5531 	if (unlikely(policy == p->policy)) {
5532 		if (fair_policy(policy) && attr->sched_nice != task_nice(p))
5533 			goto change;
5534 		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
5535 			goto change;
5536 		if (dl_policy(policy) && dl_param_changed(p, attr))
5537 			goto change;
5538 		if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
5539 			goto change;
5540 #ifdef CONFIG_SCHED_LATENCY_NICE
5541 		if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE &&
5542 		    attr->sched_latency_nice != LATENCY_TO_NICE(p->latency_prio))
5543 			goto change;
5544 #endif
5545 
5546 		p->sched_reset_on_fork = reset_on_fork;
5547 		retval = 0;
5548 		goto unlock;
5549 	}
5550 change:
5551 
5552 	if (user) {
5553 #ifdef CONFIG_RT_GROUP_SCHED
5554 		/*
5555 		 * Do not allow realtime tasks into groups that have no runtime
5556 		 * assigned.
5557 		 */
5558 		if (rt_bandwidth_enabled() && rt_policy(policy) &&
5559 				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
5560 				!task_group_is_autogroup(task_group(p))) {
5561 			retval = -EPERM;
5562 			goto unlock;
5563 		}
5564 #endif
5565 #ifdef CONFIG_SMP
5566 		if (dl_bandwidth_enabled() && dl_policy(policy) &&
5567 				!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
5568 			cpumask_t *span = rq->rd->span;
5569 
5570 			/*
5571 			 * Don't allow tasks with an affinity mask smaller than
5572 			 * the entire root_domain to become SCHED_DEADLINE. We
5573 			 * will also fail if there's no bandwidth available.
5574 			 */
5575 			if (!cpumask_subset(span, p->cpus_ptr) ||
5576 			    rq->rd->dl_bw.bw == 0) {
5577 				retval = -EPERM;
5578 				goto unlock;
5579 			}
5580 		}
5581 #endif
5582 	}
5583 
5584 	/* Re-check policy now with rq lock held: */
5585 	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
5586 		policy = oldpolicy = -1;
5587 		task_rq_unlock(rq, p, &rf);
5588 		if (pi)
5589 			cpuset_read_unlock();
5590 		goto recheck;
5591 	}
5592 
5593 	/*
5594 	 * If setscheduling to SCHED_DEADLINE (or changing the parameters
5595 	 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
5596 	 * is available.
5597 	 */
5598 	if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
5599 		retval = -EBUSY;
5600 		goto unlock;
5601 	}
5602 
5603 	p->sched_reset_on_fork = reset_on_fork;
5604 	oldprio = p->prio;
5605 
5606 	newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
5607 	if (pi) {
5608 		/*
5609 		 * Take priority boosted tasks into account. If the new
5610 		 * effective priority is unchanged, we just store the new
5611 		 * normal parameters and do not touch the scheduler class and
5612 		 * the runqueue. This will be done when the task deboost
5613 		 * itself.
5614 		 */
5615 		newprio = rt_effective_prio(p, newprio);
5616 		if (newprio == oldprio)
5617 			queue_flags &= ~DEQUEUE_MOVE;
5618 	}
5619 
5620 	queued = task_on_rq_queued(p);
5621 	running = task_current(rq, p);
5622 	if (queued)
5623 		dequeue_task(rq, p, queue_flags);
5624 	if (running)
5625 		put_prev_task(rq, p);
5626 
5627 	prev_class = p->sched_class;
5628 
5629 	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
5630 		__setscheduler_params(p, attr);
5631 		__setscheduler_prio(p, newprio);
5632 	}
5633 	__setscheduler_latency(p, attr);
5634 	__setscheduler_uclamp(p, attr);
5635 
5636 	if (queued) {
5637 		/*
5638 		 * We enqueue to tail when the priority of a task is
5639 		 * increased (user space view).
5640 		 */
5641 		if (oldprio < p->prio)
5642 			queue_flags |= ENQUEUE_HEAD;
5643 
5644 		enqueue_task(rq, p, queue_flags);
5645 	}
5646 	if (running)
5647 		set_next_task(rq, p);
5648 
5649 	check_class_changed(rq, p, prev_class, oldprio);
5650 
5651 	/* Avoid rq from going away on us: */
5652 	preempt_disable();
5653 	task_rq_unlock(rq, p, &rf);
5654 
5655 	if (pi) {
5656 		cpuset_read_unlock();
5657 		rt_mutex_adjust_pi(p);
5658 	}
5659 
5660 	/* Run balance callbacks after we've adjusted the PI chain: */
5661 	balance_callback(rq);
5662 	preempt_enable();
5663 
5664 	return 0;
5665 
5666 unlock:
5667 	task_rq_unlock(rq, p, &rf);
5668 	if (pi)
5669 		cpuset_read_unlock();
5670 	return retval;
5671 }
5672 
_sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param,bool check)5673 static int _sched_setscheduler(struct task_struct *p, int policy,
5674 			       const struct sched_param *param, bool check)
5675 {
5676 	struct sched_attr attr = {
5677 		.sched_policy   = policy,
5678 		.sched_priority = param->sched_priority,
5679 		.sched_nice	= PRIO_TO_NICE(p->static_prio),
5680 	};
5681 
5682 	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
5683 	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
5684 		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
5685 		policy &= ~SCHED_RESET_ON_FORK;
5686 		attr.sched_policy = policy;
5687 	}
5688 
5689 	return __sched_setscheduler(p, &attr, check, true);
5690 }
5691 /**
5692  * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
5693  * @p: the task in question.
5694  * @policy: new policy.
5695  * @param: structure containing the new RT priority.
5696  *
5697  * Use sched_set_fifo(), read its comment.
5698  *
5699  * Return: 0 on success. An error code otherwise.
5700  *
5701  * NOTE that the task may be already dead.
5702  */
sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param)5703 int sched_setscheduler(struct task_struct *p, int policy,
5704 		       const struct sched_param *param)
5705 {
5706 	return _sched_setscheduler(p, policy, param, true);
5707 }
5708 
sched_setattr(struct task_struct * p,const struct sched_attr * attr)5709 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
5710 {
5711 	return __sched_setscheduler(p, attr, true, true);
5712 }
5713 
sched_setattr_nocheck(struct task_struct * p,const struct sched_attr * attr)5714 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
5715 {
5716 	return __sched_setscheduler(p, attr, false, true);
5717 }
5718 
5719 /**
5720  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
5721  * @p: the task in question.
5722  * @policy: new policy.
5723  * @param: structure containing the new RT priority.
5724  *
5725  * Just like sched_setscheduler, only don't bother checking if the
5726  * current context has permission.  For example, this is needed in
5727  * stop_machine(): we create temporary high priority worker threads,
5728  * but our caller might not have that capability.
5729  *
5730  * Return: 0 on success. An error code otherwise.
5731  */
sched_setscheduler_nocheck(struct task_struct * p,int policy,const struct sched_param * param)5732 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
5733 			       const struct sched_param *param)
5734 {
5735 	return _sched_setscheduler(p, policy, param, false);
5736 }
5737 
5738 /*
5739  * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
5740  * incapable of resource management, which is the one thing an OS really should
5741  * be doing.
5742  *
5743  * This is of course the reason it is limited to privileged users only.
5744  *
5745  * Worse still; it is fundamentally impossible to compose static priority
5746  * workloads. You cannot take two correctly working static prio workloads
5747  * and smash them together and still expect them to work.
5748  *
5749  * For this reason 'all' FIFO tasks the kernel creates are basically at:
5750  *
5751  *   MAX_RT_PRIO / 2
5752  *
5753  * The administrator _MUST_ configure the system, the kernel simply doesn't
5754  * know enough information to make a sensible choice.
5755  */
sched_set_fifo(struct task_struct * p)5756 void sched_set_fifo(struct task_struct *p)
5757 {
5758 	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
5759 	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
5760 }
5761 EXPORT_SYMBOL_GPL(sched_set_fifo);
5762 
5763 /*
5764  * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
5765  */
sched_set_fifo_low(struct task_struct * p)5766 void sched_set_fifo_low(struct task_struct *p)
5767 {
5768 	struct sched_param sp = { .sched_priority = 1 };
5769 	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
5770 }
5771 EXPORT_SYMBOL_GPL(sched_set_fifo_low);
5772 
sched_set_normal(struct task_struct * p,int nice)5773 void sched_set_normal(struct task_struct *p, int nice)
5774 {
5775 	struct sched_attr attr = {
5776 		.sched_policy = SCHED_NORMAL,
5777 		.sched_nice = nice,
5778 	};
5779 	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
5780 }
5781 EXPORT_SYMBOL_GPL(sched_set_normal);
5782 
5783 static int
do_sched_setscheduler(pid_t pid,int policy,struct sched_param __user * param)5784 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
5785 {
5786 	struct sched_param lparam;
5787 	struct task_struct *p;
5788 	int retval;
5789 
5790 	if (!param || pid < 0)
5791 		return -EINVAL;
5792 	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
5793 		return -EFAULT;
5794 
5795 	rcu_read_lock();
5796 	retval = -ESRCH;
5797 	p = find_process_by_pid(pid);
5798 	if (likely(p))
5799 		get_task_struct(p);
5800 	rcu_read_unlock();
5801 
5802 	if (likely(p)) {
5803 		retval = sched_setscheduler(p, policy, &lparam);
5804 		put_task_struct(p);
5805 	}
5806 
5807 	return retval;
5808 }
5809 
5810 /*
5811  * Mimics kernel/events/core.c perf_copy_attr().
5812  */
sched_copy_attr(struct sched_attr __user * uattr,struct sched_attr * attr)5813 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
5814 {
5815 	u32 size;
5816 	int ret;
5817 
5818 	/* Zero the full structure, so that a short copy will be nice: */
5819 	memset(attr, 0, sizeof(*attr));
5820 
5821 	ret = get_user(size, &uattr->size);
5822 	if (ret)
5823 		return ret;
5824 
5825 	/* ABI compatibility quirk: */
5826 	if (!size)
5827 		size = SCHED_ATTR_SIZE_VER0;
5828 	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
5829 		goto err_size;
5830 
5831 	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
5832 	if (ret) {
5833 		if (ret == -E2BIG)
5834 			goto err_size;
5835 		return ret;
5836 	}
5837 
5838 	if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
5839 	    size < SCHED_ATTR_SIZE_VER1)
5840 		return -EINVAL;
5841 
5842 #ifdef CONFIG_SCHED_LATENCY_NICE
5843 	if ((attr->sched_flags & SCHED_FLAG_LATENCY_NICE) &&
5844 	    size < SCHED_ATTR_SIZE_VER2)
5845 		return -EINVAL;
5846 #endif
5847 	/*
5848 	 * XXX: Do we want to be lenient like existing syscalls; or do we want
5849 	 * to be strict and return an error on out-of-bounds values?
5850 	 */
5851 	attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
5852 
5853 	return 0;
5854 
5855 err_size:
5856 	put_user(sizeof(*attr), &uattr->size);
5857 	return -E2BIG;
5858 }
5859 
5860 /**
5861  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
5862  * @pid: the pid in question.
5863  * @policy: new policy.
5864  * @param: structure containing the new RT priority.
5865  *
5866  * Return: 0 on success. An error code otherwise.
5867  */
SYSCALL_DEFINE3(sched_setscheduler,pid_t,pid,int,policy,struct sched_param __user *,param)5868 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
5869 {
5870 	if (policy < 0)
5871 		return -EINVAL;
5872 
5873 	return do_sched_setscheduler(pid, policy, param);
5874 }
5875 
5876 /**
5877  * sys_sched_setparam - set/change the RT priority of a thread
5878  * @pid: the pid in question.
5879  * @param: structure containing the new RT priority.
5880  *
5881  * Return: 0 on success. An error code otherwise.
5882  */
SYSCALL_DEFINE2(sched_setparam,pid_t,pid,struct sched_param __user *,param)5883 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
5884 {
5885 	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
5886 }
5887 
5888 /**
5889  * sys_sched_setattr - same as above, but with extended sched_attr
5890  * @pid: the pid in question.
5891  * @uattr: structure containing the extended parameters.
5892  * @flags: for future extension.
5893  */
SYSCALL_DEFINE3(sched_setattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,flags)5894 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
5895 			       unsigned int, flags)
5896 {
5897 	struct sched_attr attr;
5898 	struct task_struct *p;
5899 	int retval;
5900 
5901 	if (!uattr || pid < 0 || flags)
5902 		return -EINVAL;
5903 
5904 	retval = sched_copy_attr(uattr, &attr);
5905 	if (retval)
5906 		return retval;
5907 
5908 	if ((int)attr.sched_policy < 0)
5909 		return -EINVAL;
5910 	if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
5911 		attr.sched_policy = SETPARAM_POLICY;
5912 
5913 	rcu_read_lock();
5914 	retval = -ESRCH;
5915 	p = find_process_by_pid(pid);
5916 	if (likely(p))
5917 		get_task_struct(p);
5918 	rcu_read_unlock();
5919 
5920 	if (likely(p)) {
5921 		retval = sched_setattr(p, &attr);
5922 		put_task_struct(p);
5923 	}
5924 
5925 	return retval;
5926 }
5927 
5928 /**
5929  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5930  * @pid: the pid in question.
5931  *
5932  * Return: On success, the policy of the thread. Otherwise, a negative error
5933  * code.
5934  */
SYSCALL_DEFINE1(sched_getscheduler,pid_t,pid)5935 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
5936 {
5937 	struct task_struct *p;
5938 	int retval;
5939 
5940 	if (pid < 0)
5941 		return -EINVAL;
5942 
5943 	retval = -ESRCH;
5944 	rcu_read_lock();
5945 	p = find_process_by_pid(pid);
5946 	if (p) {
5947 		retval = security_task_getscheduler(p);
5948 		if (!retval)
5949 			retval = p->policy
5950 				| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
5951 	}
5952 	rcu_read_unlock();
5953 	return retval;
5954 }
5955 
5956 /**
5957  * sys_sched_getparam - get the RT priority of a thread
5958  * @pid: the pid in question.
5959  * @param: structure containing the RT priority.
5960  *
5961  * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
5962  * code.
5963  */
SYSCALL_DEFINE2(sched_getparam,pid_t,pid,struct sched_param __user *,param)5964 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
5965 {
5966 	struct sched_param lp = { .sched_priority = 0 };
5967 	struct task_struct *p;
5968 	int retval;
5969 
5970 	if (!param || pid < 0)
5971 		return -EINVAL;
5972 
5973 	rcu_read_lock();
5974 	p = find_process_by_pid(pid);
5975 	retval = -ESRCH;
5976 	if (!p)
5977 		goto out_unlock;
5978 
5979 	retval = security_task_getscheduler(p);
5980 	if (retval)
5981 		goto out_unlock;
5982 
5983 	if (task_has_rt_policy(p))
5984 		lp.sched_priority = p->rt_priority;
5985 	rcu_read_unlock();
5986 
5987 	/*
5988 	 * This one might sleep, we cannot do it with a spinlock held ...
5989 	 */
5990 	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5991 
5992 	return retval;
5993 
5994 out_unlock:
5995 	rcu_read_unlock();
5996 	return retval;
5997 }
5998 
5999 /*
6000  * Copy the kernel size attribute structure (which might be larger
6001  * than what user-space knows about) to user-space.
6002  *
6003  * Note that all cases are valid: user-space buffer can be larger or
6004  * smaller than the kernel-space buffer. The usual case is that both
6005  * have the same size.
6006  */
6007 static int
sched_attr_copy_to_user(struct sched_attr __user * uattr,struct sched_attr * kattr,unsigned int usize)6008 sched_attr_copy_to_user(struct sched_attr __user *uattr,
6009 			struct sched_attr *kattr,
6010 			unsigned int usize)
6011 {
6012 	unsigned int ksize = sizeof(*kattr);
6013 
6014 	if (!access_ok(uattr, usize))
6015 		return -EFAULT;
6016 
6017 	/*
6018 	 * sched_getattr() ABI forwards and backwards compatibility:
6019 	 *
6020 	 * If usize == ksize then we just copy everything to user-space and all is good.
6021 	 *
6022 	 * If usize < ksize then we only copy as much as user-space has space for,
6023 	 * this keeps ABI compatibility as well. We skip the rest.
6024 	 *
6025 	 * If usize > ksize then user-space is using a newer version of the ABI,
6026 	 * which part the kernel doesn't know about. Just ignore it - tooling can
6027 	 * detect the kernel's knowledge of attributes from the attr->size value
6028 	 * which is set to ksize in this case.
6029 	 */
6030 	kattr->size = min(usize, ksize);
6031 
6032 	if (copy_to_user(uattr, kattr, kattr->size))
6033 		return -EFAULT;
6034 
6035 	return 0;
6036 }
6037 
6038 /**
6039  * sys_sched_getattr - similar to sched_getparam, but with sched_attr
6040  * @pid: the pid in question.
6041  * @uattr: structure containing the extended parameters.
6042  * @usize: sizeof(attr) for fwd/bwd comp.
6043  * @flags: for future extension.
6044  */
SYSCALL_DEFINE4(sched_getattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,usize,unsigned int,flags)6045 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
6046 		unsigned int, usize, unsigned int, flags)
6047 {
6048 	struct sched_attr kattr = { };
6049 	struct task_struct *p;
6050 	int retval;
6051 
6052 	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
6053 	    usize < SCHED_ATTR_SIZE_VER0 || flags)
6054 		return -EINVAL;
6055 
6056 	rcu_read_lock();
6057 	p = find_process_by_pid(pid);
6058 	retval = -ESRCH;
6059 	if (!p)
6060 		goto out_unlock;
6061 
6062 	retval = security_task_getscheduler(p);
6063 	if (retval)
6064 		goto out_unlock;
6065 
6066 	kattr.sched_policy = p->policy;
6067 	if (p->sched_reset_on_fork)
6068 		kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
6069 	if (task_has_dl_policy(p))
6070 		__getparam_dl(p, &kattr);
6071 	else if (task_has_rt_policy(p))
6072 		kattr.sched_priority = p->rt_priority;
6073 	else
6074 		kattr.sched_nice = task_nice(p);
6075 
6076 #ifdef CONFIG_SCHED_LATENCY_NICE
6077 	kattr.sched_latency_nice = LATENCY_TO_NICE(p->latency_prio);
6078 #endif
6079 
6080 #ifdef CONFIG_UCLAMP_TASK
6081 	/*
6082 	 * This could race with another potential updater, but this is fine
6083 	 * because it'll correctly read the old or the new value. We don't need
6084 	 * to guarantee who wins the race as long as it doesn't return garbage.
6085 	 */
6086 	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
6087 	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
6088 #endif
6089 
6090 	rcu_read_unlock();
6091 
6092 	return sched_attr_copy_to_user(uattr, &kattr, usize);
6093 
6094 out_unlock:
6095 	rcu_read_unlock();
6096 	return retval;
6097 }
6098 
sched_setaffinity(pid_t pid,const struct cpumask * in_mask)6099 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
6100 {
6101 	cpumask_var_t cpus_allowed, new_mask;
6102 	struct task_struct *p;
6103 	int retval;
6104 #ifdef CONFIG_CPU_ISOLATION_OPT
6105 	int dest_cpu;
6106 	cpumask_t allowed_mask;
6107 #endif
6108 
6109 	rcu_read_lock();
6110 
6111 	p = find_process_by_pid(pid);
6112 	if (!p) {
6113 		rcu_read_unlock();
6114 		return -ESRCH;
6115 	}
6116 
6117 	/* Prevent p going away */
6118 	get_task_struct(p);
6119 	rcu_read_unlock();
6120 
6121 	if (p->flags & PF_NO_SETAFFINITY) {
6122 		retval = -EINVAL;
6123 		goto out_put_task;
6124 	}
6125 	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
6126 		retval = -ENOMEM;
6127 		goto out_put_task;
6128 	}
6129 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
6130 		retval = -ENOMEM;
6131 		goto out_free_cpus_allowed;
6132 	}
6133 	retval = -EPERM;
6134 	if (!check_same_owner(p)) {
6135 		rcu_read_lock();
6136 		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
6137 			rcu_read_unlock();
6138 			goto out_free_new_mask;
6139 		}
6140 		rcu_read_unlock();
6141 	}
6142 
6143 	retval = security_task_setscheduler(p);
6144 	if (retval)
6145 		goto out_free_new_mask;
6146 
6147 
6148 	cpuset_cpus_allowed(p, cpus_allowed);
6149 	cpumask_and(new_mask, in_mask, cpus_allowed);
6150 
6151 	/*
6152 	 * Since bandwidth control happens on root_domain basis,
6153 	 * if admission test is enabled, we only admit -deadline
6154 	 * tasks allowed to run on all the CPUs in the task's
6155 	 * root_domain.
6156 	 */
6157 #ifdef CONFIG_SMP
6158 	if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
6159 		rcu_read_lock();
6160 		if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
6161 			retval = -EBUSY;
6162 			rcu_read_unlock();
6163 			goto out_free_new_mask;
6164 		}
6165 		rcu_read_unlock();
6166 	}
6167 #endif
6168 again:
6169 #ifdef CONFIG_CPU_ISOLATION_OPT
6170 	cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask);
6171 	dest_cpu = cpumask_any_and(cpu_active_mask, &allowed_mask);
6172 	if (dest_cpu < nr_cpu_ids) {
6173 #endif
6174 		retval = __set_cpus_allowed_ptr(p, new_mask, true);
6175 		if (!retval) {
6176 			cpuset_cpus_allowed(p, cpus_allowed);
6177 			if (!cpumask_subset(new_mask, cpus_allowed)) {
6178 				/*
6179 				 * We must have raced with a concurrent cpuset
6180 				 * update. Just reset the cpus_allowed to the
6181 				 * cpuset's cpus_allowed
6182 				 */
6183 				cpumask_copy(new_mask, cpus_allowed);
6184 				goto again;
6185 			}
6186 		}
6187 #ifdef CONFIG_CPU_ISOLATION_OPT
6188 	} else {
6189 		retval = -EINVAL;
6190 	}
6191 #endif
6192 
6193 out_free_new_mask:
6194 	free_cpumask_var(new_mask);
6195 out_free_cpus_allowed:
6196 	free_cpumask_var(cpus_allowed);
6197 out_put_task:
6198 	put_task_struct(p);
6199 	return retval;
6200 }
6201 
get_user_cpu_mask(unsigned long __user * user_mask_ptr,unsigned len,struct cpumask * new_mask)6202 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
6203 			     struct cpumask *new_mask)
6204 {
6205 	if (len < cpumask_size())
6206 		cpumask_clear(new_mask);
6207 	else if (len > cpumask_size())
6208 		len = cpumask_size();
6209 
6210 	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
6211 }
6212 
6213 /**
6214  * sys_sched_setaffinity - set the CPU affinity of a process
6215  * @pid: pid of the process
6216  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
6217  * @user_mask_ptr: user-space pointer to the new CPU mask
6218  *
6219  * Return: 0 on success. An error code otherwise.
6220  */
SYSCALL_DEFINE3(sched_setaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)6221 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
6222 		unsigned long __user *, user_mask_ptr)
6223 {
6224 	cpumask_var_t new_mask;
6225 	int retval;
6226 
6227 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
6228 		return -ENOMEM;
6229 
6230 	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
6231 	if (retval == 0)
6232 		retval = sched_setaffinity(pid, new_mask);
6233 	free_cpumask_var(new_mask);
6234 	return retval;
6235 }
6236 
sched_getaffinity(pid_t pid,struct cpumask * mask)6237 long sched_getaffinity(pid_t pid, struct cpumask *mask)
6238 {
6239 	struct task_struct *p;
6240 	unsigned long flags;
6241 	int retval;
6242 
6243 	rcu_read_lock();
6244 
6245 	retval = -ESRCH;
6246 	p = find_process_by_pid(pid);
6247 	if (!p)
6248 		goto out_unlock;
6249 
6250 	retval = security_task_getscheduler(p);
6251 	if (retval)
6252 		goto out_unlock;
6253 
6254 	raw_spin_lock_irqsave(&p->pi_lock, flags);
6255 	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
6256 
6257 #ifdef CONFIG_CPU_ISOLATION_OPT
6258 	/* The userspace tasks are forbidden to run on
6259 	 * isolated CPUs. So exclude isolated CPUs from
6260 	 * the getaffinity.
6261 	 */
6262 	if (!(p->flags & PF_KTHREAD))
6263 		cpumask_andnot(mask, mask, cpu_isolated_mask);
6264 #endif
6265 
6266 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6267 
6268 out_unlock:
6269 	rcu_read_unlock();
6270 
6271 	return retval;
6272 }
6273 
6274 /**
6275  * sys_sched_getaffinity - get the CPU affinity of a process
6276  * @pid: pid of the process
6277  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
6278  * @user_mask_ptr: user-space pointer to hold the current CPU mask
6279  *
6280  * Return: size of CPU mask copied to user_mask_ptr on success. An
6281  * error code otherwise.
6282  */
SYSCALL_DEFINE3(sched_getaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)6283 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
6284 		unsigned long __user *, user_mask_ptr)
6285 {
6286 	int ret;
6287 	cpumask_var_t mask;
6288 
6289 	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
6290 		return -EINVAL;
6291 	if (len & (sizeof(unsigned long)-1))
6292 		return -EINVAL;
6293 
6294 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
6295 		return -ENOMEM;
6296 
6297 	ret = sched_getaffinity(pid, mask);
6298 	if (ret == 0) {
6299 		unsigned int retlen = min(len, cpumask_size());
6300 
6301 		if (copy_to_user(user_mask_ptr, mask, retlen))
6302 			ret = -EFAULT;
6303 		else
6304 			ret = retlen;
6305 	}
6306 	free_cpumask_var(mask);
6307 
6308 	return ret;
6309 }
6310 
6311 /**
6312  * sys_sched_yield - yield the current processor to other threads.
6313  *
6314  * This function yields the current CPU to other tasks. If there are no
6315  * other threads running on this CPU then this function will return.
6316  *
6317  * Return: 0.
6318  */
do_sched_yield(void)6319 static void do_sched_yield(void)
6320 {
6321 	struct rq_flags rf;
6322 	struct rq *rq;
6323 
6324 	rq = this_rq_lock_irq(&rf);
6325 
6326 	schedstat_inc(rq->yld_count);
6327 	current->sched_class->yield_task(rq);
6328 
6329 	preempt_disable();
6330 	rq_unlock_irq(rq, &rf);
6331 	sched_preempt_enable_no_resched();
6332 
6333 	schedule();
6334 }
6335 
SYSCALL_DEFINE0(sched_yield)6336 SYSCALL_DEFINE0(sched_yield)
6337 {
6338 	do_sched_yield();
6339 	return 0;
6340 }
6341 
6342 #ifndef CONFIG_PREEMPTION
_cond_resched(void)6343 int __sched _cond_resched(void)
6344 {
6345 	if (should_resched(0)) {
6346 		preempt_schedule_common();
6347 		return 1;
6348 	}
6349 	rcu_all_qs();
6350 	return 0;
6351 }
6352 EXPORT_SYMBOL(_cond_resched);
6353 #endif
6354 
6355 /*
6356  * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
6357  * call schedule, and on return reacquire the lock.
6358  *
6359  * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
6360  * operations here to prevent schedule() from being called twice (once via
6361  * spin_unlock(), once by hand).
6362  */
__cond_resched_lock(spinlock_t * lock)6363 int __cond_resched_lock(spinlock_t *lock)
6364 {
6365 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
6366 	int ret = 0;
6367 
6368 	lockdep_assert_held(lock);
6369 
6370 	if (spin_needbreak(lock) || resched) {
6371 		spin_unlock(lock);
6372 		if (resched)
6373 			preempt_schedule_common();
6374 		else
6375 			cpu_relax();
6376 		ret = 1;
6377 		spin_lock(lock);
6378 	}
6379 	return ret;
6380 }
6381 EXPORT_SYMBOL(__cond_resched_lock);
6382 
6383 /**
6384  * yield - yield the current processor to other threads.
6385  *
6386  * Do not ever use this function, there's a 99% chance you're doing it wrong.
6387  *
6388  * The scheduler is at all times free to pick the calling task as the most
6389  * eligible task to run, if removing the yield() call from your code breaks
6390  * it, its already broken.
6391  *
6392  * Typical broken usage is:
6393  *
6394  * while (!event)
6395  *	yield();
6396  *
6397  * where one assumes that yield() will let 'the other' process run that will
6398  * make event true. If the current task is a SCHED_FIFO task that will never
6399  * happen. Never use yield() as a progress guarantee!!
6400  *
6401  * If you want to use yield() to wait for something, use wait_event().
6402  * If you want to use yield() to be 'nice' for others, use cond_resched().
6403  * If you still want to use yield(), do not!
6404  */
yield(void)6405 void __sched yield(void)
6406 {
6407 	set_current_state(TASK_RUNNING);
6408 	do_sched_yield();
6409 }
6410 EXPORT_SYMBOL(yield);
6411 
6412 /**
6413  * yield_to - yield the current processor to another thread in
6414  * your thread group, or accelerate that thread toward the
6415  * processor it's on.
6416  * @p: target task
6417  * @preempt: whether task preemption is allowed or not
6418  *
6419  * It's the caller's job to ensure that the target task struct
6420  * can't go away on us before we can do any checks.
6421  *
6422  * Return:
6423  *	true (>0) if we indeed boosted the target task.
6424  *	false (0) if we failed to boost the target.
6425  *	-ESRCH if there's no task to yield to.
6426  */
yield_to(struct task_struct * p,bool preempt)6427 int __sched yield_to(struct task_struct *p, bool preempt)
6428 {
6429 	struct task_struct *curr = current;
6430 	struct rq *rq, *p_rq;
6431 	unsigned long flags;
6432 	int yielded = 0;
6433 
6434 	local_irq_save(flags);
6435 	rq = this_rq();
6436 
6437 again:
6438 	p_rq = task_rq(p);
6439 	/*
6440 	 * If we're the only runnable task on the rq and target rq also
6441 	 * has only one task, there's absolutely no point in yielding.
6442 	 */
6443 	if (rq->nr_running == 1 && p_rq->nr_running == 1) {
6444 		yielded = -ESRCH;
6445 		goto out_irq;
6446 	}
6447 
6448 	double_rq_lock(rq, p_rq);
6449 	if (task_rq(p) != p_rq) {
6450 		double_rq_unlock(rq, p_rq);
6451 		goto again;
6452 	}
6453 
6454 	if (!curr->sched_class->yield_to_task)
6455 		goto out_unlock;
6456 
6457 	if (curr->sched_class != p->sched_class)
6458 		goto out_unlock;
6459 
6460 	if (task_running(p_rq, p) || p->state)
6461 		goto out_unlock;
6462 
6463 	yielded = curr->sched_class->yield_to_task(rq, p);
6464 	if (yielded) {
6465 		schedstat_inc(rq->yld_count);
6466 		/*
6467 		 * Make p's CPU reschedule; pick_next_entity takes care of
6468 		 * fairness.
6469 		 */
6470 		if (preempt && rq != p_rq)
6471 			resched_curr(p_rq);
6472 	}
6473 
6474 out_unlock:
6475 	double_rq_unlock(rq, p_rq);
6476 out_irq:
6477 	local_irq_restore(flags);
6478 
6479 	if (yielded > 0)
6480 		schedule();
6481 
6482 	return yielded;
6483 }
6484 EXPORT_SYMBOL_GPL(yield_to);
6485 
io_schedule_prepare(void)6486 int io_schedule_prepare(void)
6487 {
6488 	int old_iowait = current->in_iowait;
6489 
6490 	current->in_iowait = 1;
6491 	blk_schedule_flush_plug(current);
6492 
6493 	return old_iowait;
6494 }
6495 
io_schedule_finish(int token)6496 void io_schedule_finish(int token)
6497 {
6498 	current->in_iowait = token;
6499 }
6500 
6501 /*
6502  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
6503  * that process accounting knows that this is a task in IO wait state.
6504  */
io_schedule_timeout(long timeout)6505 long __sched io_schedule_timeout(long timeout)
6506 {
6507 	int token;
6508 	long ret;
6509 
6510 	token = io_schedule_prepare();
6511 	ret = schedule_timeout(timeout);
6512 	io_schedule_finish(token);
6513 
6514 	return ret;
6515 }
6516 EXPORT_SYMBOL(io_schedule_timeout);
6517 
io_schedule(void)6518 void __sched io_schedule(void)
6519 {
6520 	int token;
6521 
6522 	token = io_schedule_prepare();
6523 	schedule();
6524 	io_schedule_finish(token);
6525 }
6526 EXPORT_SYMBOL(io_schedule);
6527 
6528 /**
6529  * sys_sched_get_priority_max - return maximum RT priority.
6530  * @policy: scheduling class.
6531  *
6532  * Return: On success, this syscall returns the maximum
6533  * rt_priority that can be used by a given scheduling class.
6534  * On failure, a negative error code is returned.
6535  */
SYSCALL_DEFINE1(sched_get_priority_max,int,policy)6536 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
6537 {
6538 	int ret = -EINVAL;
6539 
6540 	switch (policy) {
6541 	case SCHED_FIFO:
6542 	case SCHED_RR:
6543 		ret = MAX_USER_RT_PRIO-1;
6544 		break;
6545 	case SCHED_DEADLINE:
6546 	case SCHED_NORMAL:
6547 	case SCHED_BATCH:
6548 	case SCHED_IDLE:
6549 		ret = 0;
6550 		break;
6551 	}
6552 	return ret;
6553 }
6554 
6555 /**
6556  * sys_sched_get_priority_min - return minimum RT priority.
6557  * @policy: scheduling class.
6558  *
6559  * Return: On success, this syscall returns the minimum
6560  * rt_priority that can be used by a given scheduling class.
6561  * On failure, a negative error code is returned.
6562  */
SYSCALL_DEFINE1(sched_get_priority_min,int,policy)6563 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
6564 {
6565 	int ret = -EINVAL;
6566 
6567 	switch (policy) {
6568 	case SCHED_FIFO:
6569 	case SCHED_RR:
6570 		ret = 1;
6571 		break;
6572 	case SCHED_DEADLINE:
6573 	case SCHED_NORMAL:
6574 	case SCHED_BATCH:
6575 	case SCHED_IDLE:
6576 		ret = 0;
6577 	}
6578 	return ret;
6579 }
6580 
sched_rr_get_interval(pid_t pid,struct timespec64 * t)6581 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
6582 {
6583 	struct task_struct *p;
6584 	unsigned int time_slice;
6585 	struct rq_flags rf;
6586 	struct rq *rq;
6587 	int retval;
6588 
6589 	if (pid < 0)
6590 		return -EINVAL;
6591 
6592 	retval = -ESRCH;
6593 	rcu_read_lock();
6594 	p = find_process_by_pid(pid);
6595 	if (!p)
6596 		goto out_unlock;
6597 
6598 	retval = security_task_getscheduler(p);
6599 	if (retval)
6600 		goto out_unlock;
6601 
6602 	rq = task_rq_lock(p, &rf);
6603 	time_slice = 0;
6604 	if (p->sched_class->get_rr_interval)
6605 		time_slice = p->sched_class->get_rr_interval(rq, p);
6606 	task_rq_unlock(rq, p, &rf);
6607 
6608 	rcu_read_unlock();
6609 	jiffies_to_timespec64(time_slice, t);
6610 	return 0;
6611 
6612 out_unlock:
6613 	rcu_read_unlock();
6614 	return retval;
6615 }
6616 
6617 /**
6618  * sys_sched_rr_get_interval - return the default timeslice of a process.
6619  * @pid: pid of the process.
6620  * @interval: userspace pointer to the timeslice value.
6621  *
6622  * this syscall writes the default timeslice value of a given process
6623  * into the user-space timespec buffer. A value of '0' means infinity.
6624  *
6625  * Return: On success, 0 and the timeslice is in @interval. Otherwise,
6626  * an error code.
6627  */
SYSCALL_DEFINE2(sched_rr_get_interval,pid_t,pid,struct __kernel_timespec __user *,interval)6628 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6629 		struct __kernel_timespec __user *, interval)
6630 {
6631 	struct timespec64 t;
6632 	int retval = sched_rr_get_interval(pid, &t);
6633 
6634 	if (retval == 0)
6635 		retval = put_timespec64(&t, interval);
6636 
6637 	return retval;
6638 }
6639 
6640 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE2(sched_rr_get_interval_time32,pid_t,pid,struct old_timespec32 __user *,interval)6641 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
6642 		struct old_timespec32 __user *, interval)
6643 {
6644 	struct timespec64 t;
6645 	int retval = sched_rr_get_interval(pid, &t);
6646 
6647 	if (retval == 0)
6648 		retval = put_old_timespec32(&t, interval);
6649 	return retval;
6650 }
6651 #endif
6652 
sched_show_task(struct task_struct * p)6653 void sched_show_task(struct task_struct *p)
6654 {
6655 	unsigned long free = 0;
6656 	int ppid;
6657 
6658 	if (!try_get_task_stack(p))
6659 		return;
6660 
6661 	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
6662 
6663 	if (p->state == TASK_RUNNING)
6664 		pr_cont("  running task    ");
6665 #ifdef CONFIG_DEBUG_STACK_USAGE
6666 	free = stack_not_used(p);
6667 #endif
6668 	ppid = 0;
6669 	rcu_read_lock();
6670 	if (pid_alive(p))
6671 		ppid = task_pid_nr(rcu_dereference(p->real_parent));
6672 	rcu_read_unlock();
6673 	pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n",
6674 		free, task_pid_nr(p), ppid,
6675 		(unsigned long)task_thread_info(p)->flags);
6676 
6677 	print_worker_info(KERN_INFO, p);
6678 	show_stack(p, NULL, KERN_INFO);
6679 	put_task_stack(p);
6680 }
6681 EXPORT_SYMBOL_GPL(sched_show_task);
6682 
6683 static inline bool
state_filter_match(unsigned long state_filter,struct task_struct * p)6684 state_filter_match(unsigned long state_filter, struct task_struct *p)
6685 {
6686 	/* no filter, everything matches */
6687 	if (!state_filter)
6688 		return true;
6689 
6690 	/* filter, but doesn't match */
6691 	if (!(p->state & state_filter))
6692 		return false;
6693 
6694 	/*
6695 	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
6696 	 * TASK_KILLABLE).
6697 	 */
6698 	if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
6699 		return false;
6700 
6701 	return true;
6702 }
6703 
6704 
show_state_filter(unsigned long state_filter)6705 void show_state_filter(unsigned long state_filter)
6706 {
6707 	struct task_struct *g, *p;
6708 
6709 	rcu_read_lock();
6710 	for_each_process_thread(g, p) {
6711 		/*
6712 		 * reset the NMI-timeout, listing all files on a slow
6713 		 * console might take a lot of time:
6714 		 * Also, reset softlockup watchdogs on all CPUs, because
6715 		 * another CPU might be blocked waiting for us to process
6716 		 * an IPI.
6717 		 */
6718 		touch_nmi_watchdog();
6719 		touch_all_softlockup_watchdogs();
6720 		if (state_filter_match(state_filter, p))
6721 			sched_show_task(p);
6722 	}
6723 
6724 #ifdef CONFIG_SCHED_DEBUG
6725 	if (!state_filter)
6726 		sysrq_sched_debug_show();
6727 #endif
6728 	rcu_read_unlock();
6729 	/*
6730 	 * Only show locks if all tasks are dumped:
6731 	 */
6732 	if (!state_filter)
6733 		debug_show_all_locks();
6734 }
6735 
6736 /**
6737  * init_idle - set up an idle thread for a given CPU
6738  * @idle: task in question
6739  * @cpu: CPU the idle task belongs to
6740  *
6741  * NOTE: this function does not set the idle thread's NEED_RESCHED
6742  * flag, to make booting more robust.
6743  */
init_idle(struct task_struct * idle,int cpu)6744 void __init init_idle(struct task_struct *idle, int cpu)
6745 {
6746 	struct rq *rq = cpu_rq(cpu);
6747 	unsigned long flags;
6748 
6749 	__sched_fork(0, idle);
6750 
6751 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
6752 	raw_spin_lock(&rq->lock);
6753 
6754 	idle->state = TASK_RUNNING;
6755 	idle->se.exec_start = sched_clock();
6756 	idle->flags |= PF_IDLE;
6757 
6758 #ifdef CONFIG_SMP
6759 	/*
6760 	 * Its possible that init_idle() gets called multiple times on a task,
6761 	 * in that case do_set_cpus_allowed() will not do the right thing.
6762 	 *
6763 	 * And since this is boot we can forgo the serialization.
6764 	 */
6765 	set_cpus_allowed_common(idle, cpumask_of(cpu));
6766 #endif
6767 	/*
6768 	 * We're having a chicken and egg problem, even though we are
6769 	 * holding rq->lock, the CPU isn't yet set to this CPU so the
6770 	 * lockdep check in task_group() will fail.
6771 	 *
6772 	 * Similar case to sched_fork(). / Alternatively we could
6773 	 * use task_rq_lock() here and obtain the other rq->lock.
6774 	 *
6775 	 * Silence PROVE_RCU
6776 	 */
6777 	rcu_read_lock();
6778 	__set_task_cpu(idle, cpu);
6779 	rcu_read_unlock();
6780 
6781 	rq->idle = idle;
6782 	rcu_assign_pointer(rq->curr, idle);
6783 	idle->on_rq = TASK_ON_RQ_QUEUED;
6784 #ifdef CONFIG_SMP
6785 	idle->on_cpu = 1;
6786 #endif
6787 	raw_spin_unlock(&rq->lock);
6788 	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
6789 
6790 	/* Set the preempt count _outside_ the spinlocks! */
6791 	init_idle_preempt_count(idle, cpu);
6792 
6793 	/*
6794 	 * The idle tasks have their own, simple scheduling class:
6795 	 */
6796 	idle->sched_class = &idle_sched_class;
6797 	ftrace_graph_init_idle_task(idle, cpu);
6798 	vtime_init_idle(idle, cpu);
6799 #ifdef CONFIG_SMP
6800 	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
6801 #endif
6802 }
6803 
6804 #ifdef CONFIG_SMP
6805 
cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)6806 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
6807 			      const struct cpumask *trial)
6808 {
6809 	int ret = 1;
6810 
6811 	if (!cpumask_weight(cur))
6812 		return ret;
6813 
6814 	ret = dl_cpuset_cpumask_can_shrink(cur, trial);
6815 
6816 	return ret;
6817 }
6818 
task_can_attach(struct task_struct * p,const struct cpumask * cs_cpus_allowed)6819 int task_can_attach(struct task_struct *p,
6820 		    const struct cpumask *cs_cpus_allowed)
6821 {
6822 	int ret = 0;
6823 
6824 	/*
6825 	 * Kthreads which disallow setaffinity shouldn't be moved
6826 	 * to a new cpuset; we don't want to change their CPU
6827 	 * affinity and isolating such threads by their set of
6828 	 * allowed nodes is unnecessary.  Thus, cpusets are not
6829 	 * applicable for such threads.  This prevents checking for
6830 	 * success of set_cpus_allowed_ptr() on all attached tasks
6831 	 * before cpus_mask may be changed.
6832 	 */
6833 	if (p->flags & PF_NO_SETAFFINITY) {
6834 		ret = -EINVAL;
6835 		goto out;
6836 	}
6837 
6838 	if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
6839 					      cs_cpus_allowed))
6840 		ret = dl_task_can_attach(p, cs_cpus_allowed);
6841 
6842 out:
6843 	return ret;
6844 }
6845 
6846 bool sched_smp_initialized __read_mostly;
6847 
6848 #ifdef CONFIG_NUMA_BALANCING
6849 /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)6850 int migrate_task_to(struct task_struct *p, int target_cpu)
6851 {
6852 	struct migration_arg arg = { p, target_cpu };
6853 	int curr_cpu = task_cpu(p);
6854 
6855 	if (curr_cpu == target_cpu)
6856 		return 0;
6857 
6858 	if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
6859 		return -EINVAL;
6860 
6861 	/* TODO: This is not properly updating schedstats */
6862 
6863 	trace_sched_move_numa(p, curr_cpu, target_cpu);
6864 	return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
6865 }
6866 
6867 /*
6868  * Requeue a task on a given node and accurately track the number of NUMA
6869  * tasks on the runqueues
6870  */
sched_setnuma(struct task_struct * p,int nid)6871 void sched_setnuma(struct task_struct *p, int nid)
6872 {
6873 	bool queued, running;
6874 	struct rq_flags rf;
6875 	struct rq *rq;
6876 
6877 	rq = task_rq_lock(p, &rf);
6878 	queued = task_on_rq_queued(p);
6879 	running = task_current(rq, p);
6880 
6881 	if (queued)
6882 		dequeue_task(rq, p, DEQUEUE_SAVE);
6883 	if (running)
6884 		put_prev_task(rq, p);
6885 
6886 	p->numa_preferred_nid = nid;
6887 
6888 	if (queued)
6889 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
6890 	if (running)
6891 		set_next_task(rq, p);
6892 	task_rq_unlock(rq, p, &rf);
6893 }
6894 #endif /* CONFIG_NUMA_BALANCING */
6895 
6896 #ifdef CONFIG_HOTPLUG_CPU
6897 /*
6898  * Ensure that the idle task is using init_mm right before its CPU goes
6899  * offline.
6900  */
idle_task_exit(void)6901 void idle_task_exit(void)
6902 {
6903 	struct mm_struct *mm = current->active_mm;
6904 
6905 	BUG_ON(cpu_online(smp_processor_id()));
6906 	BUG_ON(current != this_rq()->idle);
6907 
6908 	if (mm != &init_mm) {
6909 		switch_mm(mm, &init_mm, current);
6910 		finish_arch_post_lock_switch();
6911 	}
6912 
6913 	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
6914 }
6915 
6916 /*
6917  * Since this CPU is going 'away' for a while, fold any nr_active delta
6918  * we might have. Assumes we're called after migrate_tasks() so that the
6919  * nr_active count is stable. We need to take the teardown thread which
6920  * is calling this into account, so we hand in adjust = 1 to the load
6921  * calculation.
6922  *
6923  * Also see the comment "Global load-average calculations".
6924  */
calc_load_migrate(struct rq * rq)6925 static void calc_load_migrate(struct rq *rq)
6926 {
6927 	long delta = calc_load_fold_active(rq, 1);
6928 	if (delta)
6929 		atomic_long_add(delta, &calc_load_tasks);
6930 }
6931 
__pick_migrate_task(struct rq * rq)6932 static struct task_struct *__pick_migrate_task(struct rq *rq)
6933 {
6934 	const struct sched_class *class;
6935 	struct task_struct *next;
6936 
6937 	for_each_class(class) {
6938 		next = class->pick_next_task(rq);
6939 		if (next) {
6940 			next->sched_class->put_prev_task(rq, next);
6941 			return next;
6942 		}
6943 	}
6944 
6945 	/* The idle class should always have a runnable task */
6946 	BUG();
6947 }
6948 
6949 #ifdef CONFIG_CPU_ISOLATION_OPT
6950 /*
6951  * Remove a task from the runqueue and pretend that it's migrating. This
6952  * should prevent migrations for the detached task and disallow further
6953  * changes to tsk_cpus_allowed.
6954  */
6955 static void
detach_one_task_core(struct task_struct * p,struct rq * rq,struct list_head * tasks)6956 detach_one_task_core(struct task_struct *p, struct rq *rq,
6957 		     struct list_head *tasks)
6958 {
6959 	lockdep_assert_held(&rq->lock);
6960 
6961 	p->on_rq = TASK_ON_RQ_MIGRATING;
6962 	deactivate_task(rq, p, 0);
6963 	list_add(&p->se.group_node, tasks);
6964 }
6965 
attach_tasks_core(struct list_head * tasks,struct rq * rq)6966 static void attach_tasks_core(struct list_head *tasks, struct rq *rq)
6967 {
6968 	struct task_struct *p;
6969 
6970 	lockdep_assert_held(&rq->lock);
6971 
6972 	while (!list_empty(tasks)) {
6973 		p = list_first_entry(tasks, struct task_struct, se.group_node);
6974 		list_del_init(&p->se.group_node);
6975 
6976 		BUG_ON(task_rq(p) != rq);
6977 		activate_task(rq, p, 0);
6978 		p->on_rq = TASK_ON_RQ_QUEUED;
6979 	}
6980 }
6981 
6982 #else
6983 
6984 static void
detach_one_task_core(struct task_struct * p,struct rq * rq,struct list_head * tasks)6985 detach_one_task_core(struct task_struct *p, struct rq *rq,
6986 		     struct list_head *tasks)
6987 {
6988 }
6989 
attach_tasks_core(struct list_head * tasks,struct rq * rq)6990 static void attach_tasks_core(struct list_head *tasks, struct rq *rq)
6991 {
6992 }
6993 
6994 #endif /* CONFIG_CPU_ISOLATION_OPT */
6995 
6996 /*
6997  * Migrate all tasks (not pinned if pinned argument say so) from the rq,
6998  * sleeping tasks will be migrated by try_to_wake_up()->select_task_rq().
6999  *
7000  * Called with rq->lock held even though we'er in stop_machine() and
7001  * there's no concurrency possible, we hold the required locks anyway
7002  * because of lock validation efforts.
7003  */
migrate_tasks(struct rq * dead_rq,struct rq_flags * rf,bool migrate_pinned_tasks)7004 void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf,
7005 			  bool migrate_pinned_tasks)
7006 {
7007 	struct rq *rq = dead_rq;
7008 	struct task_struct *next, *stop = rq->stop;
7009 	struct rq_flags orf = *rf;
7010 	int dest_cpu;
7011 	unsigned int num_pinned_kthreads = 1; /* this thread */
7012 	LIST_HEAD(tasks);
7013 	cpumask_t avail_cpus;
7014 
7015 #ifdef CONFIG_CPU_ISOLATION_OPT
7016 	cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask);
7017 #else
7018 	cpumask_copy(&avail_cpus, cpu_online_mask);
7019 #endif
7020 
7021 	/*
7022 	 * Fudge the rq selection such that the below task selection loop
7023 	 * doesn't get stuck on the currently eligible stop task.
7024 	 *
7025 	 * We're currently inside stop_machine() and the rq is either stuck
7026 	 * in the stop_machine_cpu_stop() loop, or we're executing this code,
7027 	 * either way we should never end up calling schedule() until we're
7028 	 * done here.
7029 	 */
7030 	rq->stop = NULL;
7031 
7032 	/*
7033 	 * put_prev_task() and pick_next_task() sched
7034 	 * class method both need to have an up-to-date
7035 	 * value of rq->clock[_task]
7036 	 */
7037 	update_rq_clock(rq);
7038 
7039 	for (;;) {
7040 		/*
7041 		 * There's this thread running, bail when that's the only
7042 		 * remaining thread.
7043 		 */
7044 		if (rq->nr_running == 1)
7045 			break;
7046 
7047 		next = __pick_migrate_task(rq);
7048 
7049 		if (!migrate_pinned_tasks && next->flags & PF_KTHREAD &&
7050 			!cpumask_intersects(&avail_cpus, &next->cpus_mask)) {
7051 			detach_one_task_core(next, rq, &tasks);
7052 			num_pinned_kthreads += 1;
7053 			continue;
7054 		}
7055 
7056 		/*
7057 		 * Rules for changing task_struct::cpus_mask are holding
7058 		 * both pi_lock and rq->lock, such that holding either
7059 		 * stabilizes the mask.
7060 		 *
7061 		 * Drop rq->lock is not quite as disastrous as it usually is
7062 		 * because !cpu_active at this point, which means load-balance
7063 		 * will not interfere. Also, stop-machine.
7064 		 */
7065 		rq_unlock(rq, rf);
7066 		raw_spin_lock(&next->pi_lock);
7067 		rq_relock(rq, rf);
7068 		if (!(rq->clock_update_flags & RQCF_UPDATED))
7069 			update_rq_clock(rq);
7070 
7071 		/*
7072 		 * Since we're inside stop-machine, _nothing_ should have
7073 		 * changed the task, WARN if weird stuff happened, because in
7074 		 * that case the above rq->lock drop is a fail too.
7075 		 * However, during cpu isolation the load balancer might have
7076 		 * interferred since we don't stop all CPUs. Ignore warning for
7077 		 * this case.
7078 		 */
7079 		if (task_rq(next) != rq || !task_on_rq_queued(next)) {
7080 			WARN_ON(migrate_pinned_tasks);
7081 			raw_spin_unlock(&next->pi_lock);
7082 			continue;
7083 		}
7084 
7085 		/* Find suitable destination for @next, with force if needed. */
7086 #ifdef CONFIG_CPU_ISOLATION_OPT
7087 		dest_cpu = select_fallback_rq(dead_rq->cpu, next, false);
7088 #else
7089 		dest_cpu = select_fallback_rq(dead_rq->cpu, next);
7090 #endif
7091 		rq = __migrate_task(rq, rf, next, dest_cpu);
7092 		if (rq != dead_rq) {
7093 			rq_unlock(rq, rf);
7094 			rq = dead_rq;
7095 			*rf = orf;
7096 			rq_relock(rq, rf);
7097 			if (!(rq->clock_update_flags & RQCF_UPDATED))
7098 				update_rq_clock(rq);
7099 		}
7100 		raw_spin_unlock(&next->pi_lock);
7101 	}
7102 
7103 	rq->stop = stop;
7104 
7105 	if (num_pinned_kthreads > 1)
7106 		attach_tasks_core(&tasks, rq);
7107 }
7108 
7109 #ifdef CONFIG_SCHED_EAS
clear_eas_migration_request(int cpu)7110 static void clear_eas_migration_request(int cpu)
7111 {
7112 	struct rq *rq = cpu_rq(cpu);
7113 	unsigned long flags;
7114 
7115 	clear_reserved(cpu);
7116 	if (rq->push_task) {
7117 		struct task_struct *push_task = NULL;
7118 
7119 		raw_spin_lock_irqsave(&rq->lock, flags);
7120 		if (rq->push_task) {
7121 			clear_reserved(rq->push_cpu);
7122 			push_task = rq->push_task;
7123 			rq->push_task = NULL;
7124 		}
7125 		rq->active_balance = 0;
7126 		raw_spin_unlock_irqrestore(&rq->lock, flags);
7127 		if (push_task)
7128 			put_task_struct(push_task);
7129 	}
7130 }
7131 #else
clear_eas_migration_request(int cpu)7132 static inline void clear_eas_migration_request(int cpu) {}
7133 #endif
7134 
7135 #ifdef CONFIG_CPU_ISOLATION_OPT
do_isolation_work_cpu_stop(void * data)7136 int do_isolation_work_cpu_stop(void *data)
7137 {
7138 	unsigned int cpu = smp_processor_id();
7139 	struct rq *rq = cpu_rq(cpu);
7140 	struct rq_flags rf;
7141 
7142 	watchdog_disable(cpu);
7143 
7144 	local_irq_disable();
7145 
7146 	irq_migrate_all_off_this_cpu();
7147 
7148 	flush_smp_call_function_from_idle();
7149 
7150 	/* Update our root-domain */
7151 	rq_lock(rq, &rf);
7152 
7153 	/*
7154 	 * Temporarily mark the rq as offline. This will allow us to
7155 	 * move tasks off the CPU.
7156 	 */
7157 	if (rq->rd) {
7158 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7159 		set_rq_offline(rq);
7160 	}
7161 
7162 	migrate_tasks(rq, &rf, false);
7163 
7164 	if (rq->rd)
7165 		set_rq_online(rq);
7166 	rq_unlock(rq, &rf);
7167 
7168 	clear_eas_migration_request(cpu);
7169 	local_irq_enable();
7170 	return 0;
7171 }
7172 
do_unisolation_work_cpu_stop(void * data)7173 int do_unisolation_work_cpu_stop(void *data)
7174 {
7175 	watchdog_enable(smp_processor_id());
7176 	return 0;
7177 }
7178 
sched_update_group_capacities(int cpu)7179 static void sched_update_group_capacities(int cpu)
7180 {
7181 	struct sched_domain *sd;
7182 
7183 	mutex_lock(&sched_domains_mutex);
7184 	rcu_read_lock();
7185 
7186 	for_each_domain(cpu, sd) {
7187 		int balance_cpu = group_balance_cpu(sd->groups);
7188 
7189 		init_sched_groups_capacity(cpu, sd);
7190 		/*
7191 		 * Need to ensure this is also called with balancing
7192 		 * cpu.
7193 		 */
7194 		if (cpu != balance_cpu)
7195 			init_sched_groups_capacity(balance_cpu, sd);
7196 	}
7197 
7198 	rcu_read_unlock();
7199 	mutex_unlock(&sched_domains_mutex);
7200 }
7201 
7202 static unsigned int cpu_isolation_vote[NR_CPUS];
7203 
sched_isolate_count(const cpumask_t * mask,bool include_offline)7204 int sched_isolate_count(const cpumask_t *mask, bool include_offline)
7205 {
7206 	cpumask_t count_mask = CPU_MASK_NONE;
7207 
7208 	if (include_offline) {
7209 		cpumask_complement(&count_mask, cpu_online_mask);
7210 		cpumask_or(&count_mask, &count_mask, cpu_isolated_mask);
7211 		cpumask_and(&count_mask, &count_mask, mask);
7212 	} else {
7213 		cpumask_and(&count_mask, mask, cpu_isolated_mask);
7214 	}
7215 
7216 	return cpumask_weight(&count_mask);
7217 }
7218 
7219 /*
7220  * 1) CPU is isolated and cpu is offlined:
7221  *	Unisolate the core.
7222  * 2) CPU is not isolated and CPU is offlined:
7223  *	No action taken.
7224  * 3) CPU is offline and request to isolate
7225  *	Request ignored.
7226  * 4) CPU is offline and isolated:
7227  *	Not a possible state.
7228  * 5) CPU is online and request to isolate
7229  *	Normal case: Isolate the CPU
7230  * 6) CPU is not isolated and comes back online
7231  *	Nothing to do
7232  *
7233  * Note: The client calling sched_isolate_cpu() is repsonsible for ONLY
7234  * calling sched_unisolate_cpu() on a CPU that the client previously isolated.
7235  * Client is also responsible for unisolating when a core goes offline
7236  * (after CPU is marked offline).
7237  */
sched_isolate_cpu(int cpu)7238 int sched_isolate_cpu(int cpu)
7239 {
7240 	struct rq *rq;
7241 	cpumask_t avail_cpus;
7242 	int ret_code = 0;
7243 	u64 start_time = 0;
7244 
7245 	if (trace_sched_isolate_enabled())
7246 		start_time = sched_clock();
7247 
7248 	cpu_maps_update_begin();
7249 
7250 	cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask);
7251 
7252 	if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_possible(cpu) ||
7253 				!cpu_online(cpu) || cpu >= NR_CPUS) {
7254 		ret_code = -EINVAL;
7255 		goto out;
7256 	}
7257 
7258 	rq = cpu_rq(cpu);
7259 
7260 	if (++cpu_isolation_vote[cpu] > 1)
7261 		goto out;
7262 
7263 	/* We cannot isolate ALL cpus in the system */
7264 	if (cpumask_weight(&avail_cpus) == 1) {
7265 		--cpu_isolation_vote[cpu];
7266 		ret_code = -EINVAL;
7267 		goto out;
7268 	}
7269 
7270 	/*
7271 	 * There is a race between watchdog being enabled by hotplug and
7272 	 * core isolation disabling the watchdog. When a CPU is hotplugged in
7273 	 * and the hotplug lock has been released the watchdog thread might
7274 	 * not have run yet to enable the watchdog.
7275 	 * We have to wait for the watchdog to be enabled before proceeding.
7276 	 */
7277 	if (!watchdog_configured(cpu)) {
7278 		msleep(20);
7279 		if (!watchdog_configured(cpu)) {
7280 			--cpu_isolation_vote[cpu];
7281 			ret_code = -EBUSY;
7282 			goto out;
7283 		}
7284 	}
7285 
7286 	set_cpu_isolated(cpu, true);
7287 	cpumask_clear_cpu(cpu, &avail_cpus);
7288 
7289 	/* Migrate timers */
7290 	smp_call_function_any(&avail_cpus, hrtimer_quiesce_cpu, &cpu, 1);
7291 	smp_call_function_any(&avail_cpus, timer_quiesce_cpu, &cpu, 1);
7292 
7293 	watchdog_disable(cpu);
7294 	irq_lock_sparse();
7295 	stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0);
7296 	irq_unlock_sparse();
7297 
7298 	calc_load_migrate(rq);
7299 	update_max_interval();
7300 	sched_update_group_capacities(cpu);
7301 
7302 out:
7303 	cpu_maps_update_done();
7304 	trace_sched_isolate(cpu, cpumask_bits(cpu_isolated_mask)[0],
7305 			    start_time, 1);
7306 	return ret_code;
7307 }
7308 
7309 /*
7310  * Note: The client calling sched_isolate_cpu() is repsonsible for ONLY
7311  * calling sched_unisolate_cpu() on a CPU that the client previously isolated.
7312  * Client is also responsible for unisolating when a core goes offline
7313  * (after CPU is marked offline).
7314  */
sched_unisolate_cpu_unlocked(int cpu)7315 int sched_unisolate_cpu_unlocked(int cpu)
7316 {
7317 	int ret_code = 0;
7318 	u64 start_time = 0;
7319 
7320 	if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_possible(cpu)
7321 						|| cpu >= NR_CPUS) {
7322 		ret_code = -EINVAL;
7323 		goto out;
7324 	}
7325 
7326 	if (trace_sched_isolate_enabled())
7327 		start_time = sched_clock();
7328 
7329 	if (!cpu_isolation_vote[cpu]) {
7330 		ret_code = -EINVAL;
7331 		goto out;
7332 	}
7333 
7334 	if (--cpu_isolation_vote[cpu])
7335 		goto out;
7336 
7337 	set_cpu_isolated(cpu, false);
7338 	update_max_interval();
7339 	sched_update_group_capacities(cpu);
7340 
7341 	if (cpu_online(cpu)) {
7342 		stop_cpus(cpumask_of(cpu), do_unisolation_work_cpu_stop, 0);
7343 
7344 		/* Kick CPU to immediately do load balancing */
7345 		if (!atomic_fetch_or(NOHZ_KICK_MASK, nohz_flags(cpu)))
7346 			smp_send_reschedule(cpu);
7347 	}
7348 
7349 out:
7350 	trace_sched_isolate(cpu, cpumask_bits(cpu_isolated_mask)[0],
7351 			    start_time, 0);
7352 	return ret_code;
7353 }
7354 
sched_unisolate_cpu(int cpu)7355 int sched_unisolate_cpu(int cpu)
7356 {
7357 	int ret_code;
7358 
7359 	cpu_maps_update_begin();
7360 	ret_code = sched_unisolate_cpu_unlocked(cpu);
7361 	cpu_maps_update_done();
7362 	return ret_code;
7363 }
7364 
7365 #endif /* CONFIG_CPU_ISOLATION_OPT */
7366 
7367 #endif /* CONFIG_HOTPLUG_CPU */
7368 
set_rq_online(struct rq * rq)7369 void set_rq_online(struct rq *rq)
7370 {
7371 	if (!rq->online) {
7372 		const struct sched_class *class;
7373 
7374 		cpumask_set_cpu(rq->cpu, rq->rd->online);
7375 		rq->online = 1;
7376 
7377 		for_each_class(class) {
7378 			if (class->rq_online)
7379 				class->rq_online(rq);
7380 		}
7381 	}
7382 }
7383 
set_rq_offline(struct rq * rq)7384 void set_rq_offline(struct rq *rq)
7385 {
7386 	if (rq->online) {
7387 		const struct sched_class *class;
7388 
7389 		for_each_class(class) {
7390 			if (class->rq_offline)
7391 				class->rq_offline(rq);
7392 		}
7393 
7394 		cpumask_clear_cpu(rq->cpu, rq->rd->online);
7395 		rq->online = 0;
7396 	}
7397 }
7398 
7399 /*
7400  * used to mark begin/end of suspend/resume:
7401  */
7402 static int num_cpus_frozen;
7403 
7404 /*
7405  * Update cpusets according to cpu_active mask.  If cpusets are
7406  * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7407  * around partition_sched_domains().
7408  *
7409  * If we come here as part of a suspend/resume, don't touch cpusets because we
7410  * want to restore it back to its original state upon resume anyway.
7411  */
cpuset_cpu_active(void)7412 static void cpuset_cpu_active(void)
7413 {
7414 	if (cpuhp_tasks_frozen) {
7415 		/*
7416 		 * num_cpus_frozen tracks how many CPUs are involved in suspend
7417 		 * resume sequence. As long as this is not the last online
7418 		 * operation in the resume sequence, just build a single sched
7419 		 * domain, ignoring cpusets.
7420 		 */
7421 		partition_sched_domains(1, NULL, NULL);
7422 		if (--num_cpus_frozen)
7423 			return;
7424 		/*
7425 		 * This is the last CPU online operation. So fall through and
7426 		 * restore the original sched domains by considering the
7427 		 * cpuset configurations.
7428 		 */
7429 		cpuset_force_rebuild();
7430 	}
7431 	cpuset_update_active_cpus();
7432 }
7433 
cpuset_cpu_inactive(unsigned int cpu)7434 static int cpuset_cpu_inactive(unsigned int cpu)
7435 {
7436 	if (!cpuhp_tasks_frozen) {
7437 		if (dl_cpu_busy(cpu))
7438 			return -EBUSY;
7439 		cpuset_update_active_cpus();
7440 	} else {
7441 		num_cpus_frozen++;
7442 		partition_sched_domains(1, NULL, NULL);
7443 	}
7444 	return 0;
7445 }
7446 
sched_cpu_activate(unsigned int cpu)7447 int sched_cpu_activate(unsigned int cpu)
7448 {
7449 	struct rq *rq = cpu_rq(cpu);
7450 	struct rq_flags rf;
7451 
7452 #ifdef CONFIG_SCHED_SMT
7453 	/*
7454 	 * When going up, increment the number of cores with SMT present.
7455 	 */
7456 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
7457 		static_branch_inc_cpuslocked(&sched_smt_present);
7458 #endif
7459 	set_cpu_active(cpu, true);
7460 
7461 	if (sched_smp_initialized) {
7462 		sched_domains_numa_masks_set(cpu);
7463 		cpuset_cpu_active();
7464 	}
7465 
7466 	/*
7467 	 * Put the rq online, if not already. This happens:
7468 	 *
7469 	 * 1) In the early boot process, because we build the real domains
7470 	 *    after all CPUs have been brought up.
7471 	 *
7472 	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
7473 	 *    domains.
7474 	 */
7475 	rq_lock_irqsave(rq, &rf);
7476 	if (rq->rd) {
7477 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7478 		set_rq_online(rq);
7479 	}
7480 	rq_unlock_irqrestore(rq, &rf);
7481 
7482 	return 0;
7483 }
7484 
sched_cpu_deactivate(unsigned int cpu)7485 int sched_cpu_deactivate(unsigned int cpu)
7486 {
7487 	int ret;
7488 
7489 	set_cpu_active(cpu, false);
7490 	/*
7491 	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
7492 	 * users of this state to go away such that all new such users will
7493 	 * observe it.
7494 	 *
7495 	 * Do sync before park smpboot threads to take care the rcu boost case.
7496 	 */
7497 	synchronize_rcu();
7498 
7499 #ifdef CONFIG_SCHED_SMT
7500 	/*
7501 	 * When going down, decrement the number of cores with SMT present.
7502 	 */
7503 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
7504 		static_branch_dec_cpuslocked(&sched_smt_present);
7505 #endif
7506 
7507 	if (!sched_smp_initialized)
7508 		return 0;
7509 
7510 	ret = cpuset_cpu_inactive(cpu);
7511 	if (ret) {
7512 		set_cpu_active(cpu, true);
7513 		return ret;
7514 	}
7515 	sched_domains_numa_masks_clear(cpu);
7516 	return 0;
7517 }
7518 
sched_rq_cpu_starting(unsigned int cpu)7519 static void sched_rq_cpu_starting(unsigned int cpu)
7520 {
7521 	struct rq *rq = cpu_rq(cpu);
7522 	unsigned long flags;
7523 
7524 	raw_spin_lock_irqsave(&rq->lock, flags);
7525 	set_window_start(rq);
7526 	raw_spin_unlock_irqrestore(&rq->lock, flags);
7527 
7528 	rq->calc_load_update = calc_load_update;
7529 	update_max_interval();
7530 }
7531 
sched_cpu_starting(unsigned int cpu)7532 int sched_cpu_starting(unsigned int cpu)
7533 {
7534 	sched_rq_cpu_starting(cpu);
7535 	sched_tick_start(cpu);
7536 	clear_eas_migration_request(cpu);
7537 	return 0;
7538 }
7539 
7540 #ifdef CONFIG_HOTPLUG_CPU
sched_cpu_dying(unsigned int cpu)7541 int sched_cpu_dying(unsigned int cpu)
7542 {
7543 	struct rq *rq = cpu_rq(cpu);
7544 	struct rq_flags rf;
7545 
7546 	/* Handle pending wakeups and then migrate everything off */
7547 	sched_tick_stop(cpu);
7548 
7549 	rq_lock_irqsave(rq, &rf);
7550 
7551 	if (rq->rd) {
7552 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7553 		set_rq_offline(rq);
7554 	}
7555 	migrate_tasks(rq, &rf, true);
7556 	BUG_ON(rq->nr_running != 1);
7557 	rq_unlock_irqrestore(rq, &rf);
7558 
7559 	clear_eas_migration_request(cpu);
7560 
7561 	calc_load_migrate(rq);
7562 	update_max_interval();
7563 	nohz_balance_exit_idle(rq);
7564 	hrtick_clear(rq);
7565 	return 0;
7566 }
7567 #endif
7568 
sched_init_smp(void)7569 void __init sched_init_smp(void)
7570 {
7571 	sched_init_numa();
7572 
7573 	/*
7574 	 * There's no userspace yet to cause hotplug operations; hence all the
7575 	 * CPU masks are stable and all blatant races in the below code cannot
7576 	 * happen.
7577 	 */
7578 	mutex_lock(&sched_domains_mutex);
7579 	sched_init_domains(cpu_active_mask);
7580 	mutex_unlock(&sched_domains_mutex);
7581 
7582 	update_cluster_topology();
7583 
7584 	/* Move init over to a non-isolated CPU */
7585 	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
7586 		BUG();
7587 	sched_init_granularity();
7588 
7589 	init_sched_rt_class();
7590 	init_sched_dl_class();
7591 
7592 	sched_smp_initialized = true;
7593 }
7594 
migration_init(void)7595 static int __init migration_init(void)
7596 {
7597 	sched_cpu_starting(smp_processor_id());
7598 	return 0;
7599 }
7600 early_initcall(migration_init);
7601 
7602 #else
sched_init_smp(void)7603 void __init sched_init_smp(void)
7604 {
7605 	sched_init_granularity();
7606 }
7607 #endif /* CONFIG_SMP */
7608 
in_sched_functions(unsigned long addr)7609 int in_sched_functions(unsigned long addr)
7610 {
7611 	return in_lock_functions(addr) ||
7612 		(addr >= (unsigned long)__sched_text_start
7613 		&& addr < (unsigned long)__sched_text_end);
7614 }
7615 
7616 #ifdef CONFIG_CGROUP_SCHED
7617 /*
7618  * Default task group.
7619  * Every task in system belongs to this group at bootup.
7620  */
7621 struct task_group root_task_group;
7622 LIST_HEAD(task_groups);
7623 
7624 /* Cacheline aligned slab cache for task_group */
7625 static struct kmem_cache *task_group_cache __read_mostly;
7626 #endif
7627 
7628 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
7629 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
7630 
sched_init(void)7631 void __init sched_init(void)
7632 {
7633 	unsigned long ptr = 0;
7634 	int i;
7635 
7636 	/* Make sure the linker didn't screw up */
7637 	BUG_ON(&idle_sched_class + 1 != &fair_sched_class ||
7638 	       &fair_sched_class + 1 != &rt_sched_class ||
7639 	       &rt_sched_class + 1   != &dl_sched_class);
7640 #ifdef CONFIG_SMP
7641 	BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
7642 #endif
7643 
7644 	wait_bit_init();
7645 
7646 	init_clusters();
7647 
7648 #ifdef CONFIG_FAIR_GROUP_SCHED
7649 	ptr += 2 * nr_cpu_ids * sizeof(void **);
7650 #endif
7651 #ifdef CONFIG_RT_GROUP_SCHED
7652 	ptr += 2 * nr_cpu_ids * sizeof(void **);
7653 #endif
7654 	if (ptr) {
7655 		ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
7656 
7657 #ifdef CONFIG_FAIR_GROUP_SCHED
7658 		root_task_group.se = (struct sched_entity **)ptr;
7659 		ptr += nr_cpu_ids * sizeof(void **);
7660 
7661 		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
7662 		ptr += nr_cpu_ids * sizeof(void **);
7663 
7664 		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
7665 		init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
7666 #endif /* CONFIG_FAIR_GROUP_SCHED */
7667 #ifdef CONFIG_RT_GROUP_SCHED
7668 		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
7669 		ptr += nr_cpu_ids * sizeof(void **);
7670 
7671 		root_task_group.rt_rq = (struct rt_rq **)ptr;
7672 		ptr += nr_cpu_ids * sizeof(void **);
7673 
7674 #endif /* CONFIG_RT_GROUP_SCHED */
7675 	}
7676 #ifdef CONFIG_CPUMASK_OFFSTACK
7677 	for_each_possible_cpu(i) {
7678 		per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7679 			cpumask_size(), GFP_KERNEL, cpu_to_node(i));
7680 		per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
7681 			cpumask_size(), GFP_KERNEL, cpu_to_node(i));
7682 	}
7683 #endif /* CONFIG_CPUMASK_OFFSTACK */
7684 
7685 	init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
7686 	init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime());
7687 
7688 #ifdef CONFIG_SMP
7689 	init_defrootdomain();
7690 #endif
7691 
7692 #ifdef CONFIG_RT_GROUP_SCHED
7693 	init_rt_bandwidth(&root_task_group.rt_bandwidth,
7694 			global_rt_period(), global_rt_runtime());
7695 #endif /* CONFIG_RT_GROUP_SCHED */
7696 
7697 #ifdef CONFIG_CGROUP_SCHED
7698 	task_group_cache = KMEM_CACHE(task_group, 0);
7699 
7700 	list_add(&root_task_group.list, &task_groups);
7701 	INIT_LIST_HEAD(&root_task_group.children);
7702 	INIT_LIST_HEAD(&root_task_group.siblings);
7703 	autogroup_init(&init_task);
7704 #endif /* CONFIG_CGROUP_SCHED */
7705 
7706 	for_each_possible_cpu(i) {
7707 		struct rq *rq;
7708 
7709 		rq = cpu_rq(i);
7710 		raw_spin_lock_init(&rq->lock);
7711 		rq->nr_running = 0;
7712 		rq->calc_load_active = 0;
7713 		rq->calc_load_update = jiffies + LOAD_FREQ;
7714 		init_cfs_rq(&rq->cfs);
7715 		init_rt_rq(&rq->rt);
7716 		init_dl_rq(&rq->dl);
7717 #ifdef CONFIG_FAIR_GROUP_SCHED
7718 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7719 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
7720 		/*
7721 		 * How much CPU bandwidth does root_task_group get?
7722 		 *
7723 		 * In case of task-groups formed thr' the cgroup filesystem, it
7724 		 * gets 100% of the CPU resources in the system. This overall
7725 		 * system CPU resource is divided among the tasks of
7726 		 * root_task_group and its child task-groups in a fair manner,
7727 		 * based on each entity's (task or task-group's) weight
7728 		 * (se->load.weight).
7729 		 *
7730 		 * In other words, if root_task_group has 10 tasks of weight
7731 		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7732 		 * then A0's share of the CPU resource is:
7733 		 *
7734 		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
7735 		 *
7736 		 * We achieve this by letting root_task_group's tasks sit
7737 		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
7738 		 */
7739 		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
7740 #endif /* CONFIG_FAIR_GROUP_SCHED */
7741 
7742 		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
7743 #ifdef CONFIG_RT_GROUP_SCHED
7744 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
7745 #endif
7746 #ifdef CONFIG_SMP
7747 		rq->sd = NULL;
7748 		rq->rd = NULL;
7749 		rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
7750 		rq->balance_callback = NULL;
7751 		rq->active_balance = 0;
7752 		rq->next_balance = jiffies;
7753 		rq->push_cpu = 0;
7754 		rq->cpu = i;
7755 		rq->online = 0;
7756 		rq->idle_stamp = 0;
7757 		rq->avg_idle = 2*sysctl_sched_migration_cost;
7758 		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
7759 		walt_sched_init_rq(rq);
7760 
7761 		INIT_LIST_HEAD(&rq->cfs_tasks);
7762 
7763 		rq_attach_root(rq, &def_root_domain);
7764 #ifdef CONFIG_NO_HZ_COMMON
7765 		rq->last_blocked_load_update_tick = jiffies;
7766 		atomic_set(&rq->nohz_flags, 0);
7767 
7768 		rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func);
7769 #endif
7770 #endif /* CONFIG_SMP */
7771 		hrtick_rq_init(rq);
7772 		atomic_set(&rq->nr_iowait, 0);
7773 	}
7774 
7775 	BUG_ON(alloc_related_thread_groups());
7776 	set_load_weight(&init_task, false);
7777 
7778 	/*
7779 	 * The boot idle thread does lazy MMU switching as well:
7780 	 */
7781 	mmgrab(&init_mm);
7782 	enter_lazy_tlb(&init_mm, current);
7783 
7784 	/*
7785 	 * Make us the idle thread. Technically, schedule() should not be
7786 	 * called from this thread, however somewhere below it might be,
7787 	 * but because we are the idle thread, we just pick up running again
7788 	 * when this runqueue becomes "idle".
7789 	 */
7790 	init_idle(current, smp_processor_id());
7791 	init_new_task_load(current);
7792 
7793 	calc_load_update = jiffies + LOAD_FREQ;
7794 
7795 #ifdef CONFIG_SMP
7796 	idle_thread_set_boot_cpu();
7797 #endif
7798 	init_sched_fair_class();
7799 
7800 	init_schedstats();
7801 
7802 	psi_init();
7803 
7804 	init_uclamp();
7805 
7806 	scheduler_running = 1;
7807 }
7808 
7809 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
preempt_count_equals(int preempt_offset)7810 static inline int preempt_count_equals(int preempt_offset)
7811 {
7812 	int nested = preempt_count() + rcu_preempt_depth();
7813 
7814 	return (nested == preempt_offset);
7815 }
7816 
__might_sleep(const char * file,int line,int preempt_offset)7817 void __might_sleep(const char *file, int line, int preempt_offset)
7818 {
7819 	/*
7820 	 * Blocking primitives will set (and therefore destroy) current->state,
7821 	 * since we will exit with TASK_RUNNING make sure we enter with it,
7822 	 * otherwise we will destroy state.
7823 	 */
7824 	WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
7825 			"do not call blocking ops when !TASK_RUNNING; "
7826 			"state=%lx set at [<%p>] %pS\n",
7827 			current->state,
7828 			(void *)current->task_state_change,
7829 			(void *)current->task_state_change);
7830 
7831 	___might_sleep(file, line, preempt_offset);
7832 }
7833 EXPORT_SYMBOL(__might_sleep);
7834 
___might_sleep(const char * file,int line,int preempt_offset)7835 void ___might_sleep(const char *file, int line, int preempt_offset)
7836 {
7837 	/* Ratelimiting timestamp: */
7838 	static unsigned long prev_jiffy;
7839 
7840 	unsigned long preempt_disable_ip;
7841 
7842 	/* WARN_ON_ONCE() by default, no rate limit required: */
7843 	rcu_sleep_check();
7844 
7845 	if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
7846 	     !is_idle_task(current) && !current->non_block_count) ||
7847 	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
7848 	    oops_in_progress)
7849 		return;
7850 
7851 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7852 		return;
7853 	prev_jiffy = jiffies;
7854 
7855 	/* Save this before calling printk(), since that will clobber it: */
7856 	preempt_disable_ip = get_preempt_disable_ip(current);
7857 
7858 	printk(KERN_ERR
7859 		"BUG: sleeping function called from invalid context at %s:%d\n",
7860 			file, line);
7861 	printk(KERN_ERR
7862 		"in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
7863 			in_atomic(), irqs_disabled(), current->non_block_count,
7864 			current->pid, current->comm);
7865 
7866 	if (task_stack_end_corrupted(current))
7867 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7868 
7869 	debug_show_held_locks(current);
7870 	if (irqs_disabled())
7871 		print_irqtrace_events(current);
7872 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
7873 	    && !preempt_count_equals(preempt_offset)) {
7874 		pr_err("Preemption disabled at:");
7875 		print_ip_sym(KERN_ERR, preempt_disable_ip);
7876 	}
7877 	dump_stack();
7878 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
7879 }
7880 EXPORT_SYMBOL(___might_sleep);
7881 
__cant_sleep(const char * file,int line,int preempt_offset)7882 void __cant_sleep(const char *file, int line, int preempt_offset)
7883 {
7884 	static unsigned long prev_jiffy;
7885 
7886 	if (irqs_disabled())
7887 		return;
7888 
7889 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
7890 		return;
7891 
7892 	if (preempt_count() > preempt_offset)
7893 		return;
7894 
7895 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7896 		return;
7897 	prev_jiffy = jiffies;
7898 
7899 	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
7900 	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7901 			in_atomic(), irqs_disabled(),
7902 			current->pid, current->comm);
7903 
7904 	debug_show_held_locks(current);
7905 	dump_stack();
7906 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
7907 }
7908 EXPORT_SYMBOL_GPL(__cant_sleep);
7909 #endif
7910 
7911 #ifdef CONFIG_MAGIC_SYSRQ
normalize_rt_tasks(void)7912 void normalize_rt_tasks(void)
7913 {
7914 	struct task_struct *g, *p;
7915 	struct sched_attr attr = {
7916 		.sched_policy = SCHED_NORMAL,
7917 	};
7918 
7919 	read_lock(&tasklist_lock);
7920 	for_each_process_thread(g, p) {
7921 		/*
7922 		 * Only normalize user tasks:
7923 		 */
7924 		if (p->flags & PF_KTHREAD)
7925 			continue;
7926 
7927 		p->se.exec_start = 0;
7928 		schedstat_set(p->se.statistics.wait_start,  0);
7929 		schedstat_set(p->se.statistics.sleep_start, 0);
7930 		schedstat_set(p->se.statistics.block_start, 0);
7931 
7932 		if (!dl_task(p) && !rt_task(p)) {
7933 			/*
7934 			 * Renice negative nice level userspace
7935 			 * tasks back to 0:
7936 			 */
7937 			if (task_nice(p) < 0)
7938 				set_user_nice(p, 0);
7939 			continue;
7940 		}
7941 
7942 		__sched_setscheduler(p, &attr, false, false);
7943 	}
7944 	read_unlock(&tasklist_lock);
7945 }
7946 
7947 #endif /* CONFIG_MAGIC_SYSRQ */
7948 
7949 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
7950 /*
7951  * These functions are only useful for the IA64 MCA handling, or kdb.
7952  *
7953  * They can only be called when the whole system has been
7954  * stopped - every CPU needs to be quiescent, and no scheduling
7955  * activity can take place. Using them for anything else would
7956  * be a serious bug, and as a result, they aren't even visible
7957  * under any other configuration.
7958  */
7959 
7960 /**
7961  * curr_task - return the current task for a given CPU.
7962  * @cpu: the processor in question.
7963  *
7964  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7965  *
7966  * Return: The current task for @cpu.
7967  */
curr_task(int cpu)7968 struct task_struct *curr_task(int cpu)
7969 {
7970 	return cpu_curr(cpu);
7971 }
7972 
7973 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7974 
7975 #ifdef CONFIG_IA64
7976 /**
7977  * ia64_set_curr_task - set the current task for a given CPU.
7978  * @cpu: the processor in question.
7979  * @p: the task pointer to set.
7980  *
7981  * Description: This function must only be used when non-maskable interrupts
7982  * are serviced on a separate stack. It allows the architecture to switch the
7983  * notion of the current task on a CPU in a non-blocking manner. This function
7984  * must be called with all CPU's synchronized, and interrupts disabled, the
7985  * and caller must save the original value of the current task (see
7986  * curr_task() above) and restore that value before reenabling interrupts and
7987  * re-starting the system.
7988  *
7989  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7990  */
ia64_set_curr_task(int cpu,struct task_struct * p)7991 void ia64_set_curr_task(int cpu, struct task_struct *p)
7992 {
7993 	cpu_curr(cpu) = p;
7994 }
7995 
7996 #endif
7997 
7998 #ifdef CONFIG_CGROUP_SCHED
7999 /* task_group_lock serializes the addition/removal of task groups */
8000 static DEFINE_SPINLOCK(task_group_lock);
8001 
alloc_uclamp_sched_group(struct task_group * tg,struct task_group * parent)8002 static inline void alloc_uclamp_sched_group(struct task_group *tg,
8003 					    struct task_group *parent)
8004 {
8005 #ifdef CONFIG_UCLAMP_TASK_GROUP
8006 	enum uclamp_id clamp_id;
8007 
8008 	for_each_clamp_id(clamp_id) {
8009 		uclamp_se_set(&tg->uclamp_req[clamp_id],
8010 			      uclamp_none(clamp_id), false);
8011 		tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
8012 	}
8013 #endif
8014 }
8015 
sched_free_group(struct task_group * tg)8016 static void sched_free_group(struct task_group *tg)
8017 {
8018 	free_fair_sched_group(tg);
8019 	free_rt_sched_group(tg);
8020 	autogroup_free(tg);
8021 	kmem_cache_free(task_group_cache, tg);
8022 }
8023 
8024 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)8025 struct task_group *sched_create_group(struct task_group *parent)
8026 {
8027 	struct task_group *tg;
8028 
8029 	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
8030 	if (!tg)
8031 		return ERR_PTR(-ENOMEM);
8032 
8033 	if (!alloc_fair_sched_group(tg, parent))
8034 		goto err;
8035 
8036 	if (!alloc_rt_sched_group(tg, parent))
8037 		goto err;
8038 
8039 	alloc_uclamp_sched_group(tg, parent);
8040 
8041 	return tg;
8042 
8043 err:
8044 	sched_free_group(tg);
8045 	return ERR_PTR(-ENOMEM);
8046 }
8047 
sched_online_group(struct task_group * tg,struct task_group * parent)8048 void sched_online_group(struct task_group *tg, struct task_group *parent)
8049 {
8050 	unsigned long flags;
8051 
8052 	spin_lock_irqsave(&task_group_lock, flags);
8053 	list_add_rcu(&tg->list, &task_groups);
8054 
8055 	/* Root should already exist: */
8056 	WARN_ON(!parent);
8057 
8058 	tg->parent = parent;
8059 	INIT_LIST_HEAD(&tg->children);
8060 	list_add_rcu(&tg->siblings, &parent->children);
8061 	spin_unlock_irqrestore(&task_group_lock, flags);
8062 
8063 	online_fair_sched_group(tg);
8064 }
8065 
8066 /* rcu callback to free various structures associated with a task group */
sched_free_group_rcu(struct rcu_head * rhp)8067 static void sched_free_group_rcu(struct rcu_head *rhp)
8068 {
8069 	/* Now it should be safe to free those cfs_rqs: */
8070 	sched_free_group(container_of(rhp, struct task_group, rcu));
8071 }
8072 
sched_destroy_group(struct task_group * tg)8073 void sched_destroy_group(struct task_group *tg)
8074 {
8075 	/* Wait for possible concurrent references to cfs_rqs complete: */
8076 	call_rcu(&tg->rcu, sched_free_group_rcu);
8077 }
8078 
sched_offline_group(struct task_group * tg)8079 void sched_offline_group(struct task_group *tg)
8080 {
8081 	unsigned long flags;
8082 
8083 	/* End participation in shares distribution: */
8084 	unregister_fair_sched_group(tg);
8085 
8086 	spin_lock_irqsave(&task_group_lock, flags);
8087 	list_del_rcu(&tg->list);
8088 	list_del_rcu(&tg->siblings);
8089 	spin_unlock_irqrestore(&task_group_lock, flags);
8090 }
8091 
sched_change_group(struct task_struct * tsk,int type)8092 static void sched_change_group(struct task_struct *tsk, int type)
8093 {
8094 	struct task_group *tg;
8095 
8096 	/*
8097 	 * All callers are synchronized by task_rq_lock(); we do not use RCU
8098 	 * which is pointless here. Thus, we pass "true" to task_css_check()
8099 	 * to prevent lockdep warnings.
8100 	 */
8101 	tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8102 			  struct task_group, css);
8103 	tg = autogroup_task_group(tsk, tg);
8104 	tsk->sched_task_group = tg;
8105 
8106 #ifdef CONFIG_FAIR_GROUP_SCHED
8107 	if (tsk->sched_class->task_change_group)
8108 		tsk->sched_class->task_change_group(tsk, type);
8109 	else
8110 #endif
8111 		set_task_rq(tsk, task_cpu(tsk));
8112 }
8113 
8114 /*
8115  * Change task's runqueue when it moves between groups.
8116  *
8117  * The caller of this function should have put the task in its new group by
8118  * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
8119  * its new group.
8120  */
sched_move_task(struct task_struct * tsk)8121 void sched_move_task(struct task_struct *tsk)
8122 {
8123 	int queued, running, queue_flags =
8124 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
8125 	struct rq_flags rf;
8126 	struct rq *rq;
8127 
8128 	rq = task_rq_lock(tsk, &rf);
8129 	update_rq_clock(rq);
8130 
8131 	running = task_current(rq, tsk);
8132 	queued = task_on_rq_queued(tsk);
8133 
8134 	if (queued)
8135 		dequeue_task(rq, tsk, queue_flags);
8136 	if (running)
8137 		put_prev_task(rq, tsk);
8138 
8139 	sched_change_group(tsk, TASK_MOVE_GROUP);
8140 
8141 	if (queued)
8142 		enqueue_task(rq, tsk, queue_flags);
8143 	if (running) {
8144 		set_next_task(rq, tsk);
8145 		/*
8146 		 * After changing group, the running task may have joined a
8147 		 * throttled one but it's still the running task. Trigger a
8148 		 * resched to make sure that task can still run.
8149 		 */
8150 		resched_curr(rq);
8151 	}
8152 
8153 	task_rq_unlock(rq, tsk, &rf);
8154 }
8155 
css_tg(struct cgroup_subsys_state * css)8156 static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
8157 {
8158 	return css ? container_of(css, struct task_group, css) : NULL;
8159 }
8160 
8161 static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)8162 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8163 {
8164 	struct task_group *parent = css_tg(parent_css);
8165 	struct task_group *tg;
8166 
8167 	if (!parent) {
8168 		/* This is early initialization for the top cgroup */
8169 		return &root_task_group.css;
8170 	}
8171 
8172 	tg = sched_create_group(parent);
8173 	if (IS_ERR(tg))
8174 		return ERR_PTR(-ENOMEM);
8175 
8176 #ifdef CONFIG_SCHED_RTG_CGROUP
8177 	tg->colocate = false;
8178 	tg->colocate_update_disabled = false;
8179 #endif
8180 
8181 	return &tg->css;
8182 }
8183 
8184 /* Expose task group only after completing cgroup initialization */
cpu_cgroup_css_online(struct cgroup_subsys_state * css)8185 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
8186 {
8187 	struct task_group *tg = css_tg(css);
8188 	struct task_group *parent = css_tg(css->parent);
8189 
8190 	if (parent)
8191 		sched_online_group(tg, parent);
8192 
8193 #ifdef CONFIG_UCLAMP_TASK_GROUP
8194 	/* Propagate the effective uclamp value for the new group */
8195 	mutex_lock(&uclamp_mutex);
8196 	rcu_read_lock();
8197 	cpu_util_update_eff(css);
8198 	rcu_read_unlock();
8199 	mutex_unlock(&uclamp_mutex);
8200 #endif
8201 
8202 	return 0;
8203 }
8204 
cpu_cgroup_css_released(struct cgroup_subsys_state * css)8205 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
8206 {
8207 	struct task_group *tg = css_tg(css);
8208 
8209 	sched_offline_group(tg);
8210 }
8211 
cpu_cgroup_css_free(struct cgroup_subsys_state * css)8212 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
8213 {
8214 	struct task_group *tg = css_tg(css);
8215 
8216 	/*
8217 	 * Relies on the RCU grace period between css_released() and this.
8218 	 */
8219 	sched_free_group(tg);
8220 }
8221 
8222 /*
8223  * This is called before wake_up_new_task(), therefore we really only
8224  * have to set its group bits, all the other stuff does not apply.
8225  */
cpu_cgroup_fork(struct task_struct * task)8226 static void cpu_cgroup_fork(struct task_struct *task)
8227 {
8228 	struct rq_flags rf;
8229 	struct rq *rq;
8230 
8231 	rq = task_rq_lock(task, &rf);
8232 
8233 	update_rq_clock(rq);
8234 	sched_change_group(task, TASK_SET_GROUP);
8235 
8236 	task_rq_unlock(rq, task, &rf);
8237 }
8238 
cpu_cgroup_can_attach(struct cgroup_taskset * tset)8239 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
8240 {
8241 	struct task_struct *task;
8242 	struct cgroup_subsys_state *css;
8243 	int ret = 0;
8244 
8245 	cgroup_taskset_for_each(task, css, tset) {
8246 #ifdef CONFIG_RT_GROUP_SCHED
8247 		if (!sched_rt_can_attach(css_tg(css), task))
8248 			return -EINVAL;
8249 #endif
8250 		/*
8251 		 * Serialize against wake_up_new_task() such that if its
8252 		 * running, we're sure to observe its full state.
8253 		 */
8254 		raw_spin_lock_irq(&task->pi_lock);
8255 		/*
8256 		 * Avoid calling sched_move_task() before wake_up_new_task()
8257 		 * has happened. This would lead to problems with PELT, due to
8258 		 * move wanting to detach+attach while we're not attached yet.
8259 		 */
8260 		if (task->state == TASK_NEW)
8261 			ret = -EINVAL;
8262 		raw_spin_unlock_irq(&task->pi_lock);
8263 
8264 		if (ret)
8265 			break;
8266 	}
8267 	return ret;
8268 }
8269 
8270 #if defined(CONFIG_UCLAMP_TASK_GROUP) && defined(CONFIG_SCHED_RTG_CGROUP)
schedgp_attach(struct cgroup_taskset * tset)8271 static void schedgp_attach(struct cgroup_taskset *tset)
8272 {
8273 	struct task_struct *task;
8274 	struct cgroup_subsys_state *css;
8275 	bool colocate;
8276 	struct task_group *tg;
8277 
8278 	cgroup_taskset_first(tset, &css);
8279 	tg = css_tg(css);
8280 
8281 	colocate = tg->colocate;
8282 
8283 	cgroup_taskset_for_each(task, css, tset)
8284 		sync_cgroup_colocation(task, colocate);
8285 }
8286 #else
schedgp_attach(struct cgroup_taskset * tset)8287 static void schedgp_attach(struct cgroup_taskset *tset) { }
8288 #endif
cpu_cgroup_attach(struct cgroup_taskset * tset)8289 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
8290 {
8291 	struct task_struct *task;
8292 	struct cgroup_subsys_state *css;
8293 
8294 	cgroup_taskset_for_each(task, css, tset)
8295 		sched_move_task(task);
8296 
8297 	schedgp_attach(tset);
8298 }
8299 
8300 #ifdef CONFIG_UCLAMP_TASK_GROUP
cpu_util_update_eff(struct cgroup_subsys_state * css)8301 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
8302 {
8303 	struct cgroup_subsys_state *top_css = css;
8304 	struct uclamp_se *uc_parent = NULL;
8305 	struct uclamp_se *uc_se = NULL;
8306 	unsigned int eff[UCLAMP_CNT];
8307 	enum uclamp_id clamp_id;
8308 	unsigned int clamps;
8309 
8310 	lockdep_assert_held(&uclamp_mutex);
8311 	SCHED_WARN_ON(!rcu_read_lock_held());
8312 
8313 	css_for_each_descendant_pre(css, top_css) {
8314 		uc_parent = css_tg(css)->parent
8315 			? css_tg(css)->parent->uclamp : NULL;
8316 
8317 		for_each_clamp_id(clamp_id) {
8318 			/* Assume effective clamps matches requested clamps */
8319 			eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
8320 			/* Cap effective clamps with parent's effective clamps */
8321 			if (uc_parent &&
8322 			    eff[clamp_id] > uc_parent[clamp_id].value) {
8323 				eff[clamp_id] = uc_parent[clamp_id].value;
8324 			}
8325 		}
8326 		/* Ensure protection is always capped by limit */
8327 		eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
8328 
8329 		/* Propagate most restrictive effective clamps */
8330 		clamps = 0x0;
8331 		uc_se = css_tg(css)->uclamp;
8332 		for_each_clamp_id(clamp_id) {
8333 			if (eff[clamp_id] == uc_se[clamp_id].value)
8334 				continue;
8335 			uc_se[clamp_id].value = eff[clamp_id];
8336 			uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
8337 			clamps |= (0x1 << clamp_id);
8338 		}
8339 		if (!clamps) {
8340 			css = css_rightmost_descendant(css);
8341 			continue;
8342 		}
8343 
8344 		/* Immediately update descendants RUNNABLE tasks */
8345 		uclamp_update_active_tasks(css);
8346 	}
8347 }
8348 
8349 /*
8350  * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
8351  * C expression. Since there is no way to convert a macro argument (N) into a
8352  * character constant, use two levels of macros.
8353  */
8354 #define _POW10(exp) ((unsigned int)1e##exp)
8355 #define POW10(exp) _POW10(exp)
8356 
8357 struct uclamp_request {
8358 #define UCLAMP_PERCENT_SHIFT	2
8359 #define UCLAMP_PERCENT_SCALE	(100 * POW10(UCLAMP_PERCENT_SHIFT))
8360 	s64 percent;
8361 	u64 util;
8362 	int ret;
8363 };
8364 
8365 static inline struct uclamp_request
capacity_from_percent(char * buf)8366 capacity_from_percent(char *buf)
8367 {
8368 	struct uclamp_request req = {
8369 		.percent = UCLAMP_PERCENT_SCALE,
8370 		.util = SCHED_CAPACITY_SCALE,
8371 		.ret = 0,
8372 	};
8373 
8374 	buf = strim(buf);
8375 	if (strcmp(buf, "max")) {
8376 		req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
8377 					     &req.percent);
8378 		if (req.ret)
8379 			return req;
8380 		if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
8381 			req.ret = -ERANGE;
8382 			return req;
8383 		}
8384 
8385 		req.util = req.percent << SCHED_CAPACITY_SHIFT;
8386 		req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
8387 	}
8388 
8389 	return req;
8390 }
8391 
cpu_uclamp_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,enum uclamp_id clamp_id)8392 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
8393 				size_t nbytes, loff_t off,
8394 				enum uclamp_id clamp_id)
8395 {
8396 	struct uclamp_request req;
8397 	struct task_group *tg;
8398 
8399 	req = capacity_from_percent(buf);
8400 	if (req.ret)
8401 		return req.ret;
8402 
8403 	static_branch_enable(&sched_uclamp_used);
8404 
8405 	mutex_lock(&uclamp_mutex);
8406 	rcu_read_lock();
8407 
8408 	tg = css_tg(of_css(of));
8409 	if (tg->uclamp_req[clamp_id].value != req.util)
8410 		uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
8411 
8412 	/*
8413 	 * Because of not recoverable conversion rounding we keep track of the
8414 	 * exact requested value
8415 	 */
8416 	tg->uclamp_pct[clamp_id] = req.percent;
8417 
8418 	/* Update effective clamps to track the most restrictive value */
8419 	cpu_util_update_eff(of_css(of));
8420 
8421 	rcu_read_unlock();
8422 	mutex_unlock(&uclamp_mutex);
8423 
8424 	return nbytes;
8425 }
8426 
cpu_uclamp_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)8427 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
8428 				    char *buf, size_t nbytes,
8429 				    loff_t off)
8430 {
8431 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
8432 }
8433 
cpu_uclamp_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)8434 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
8435 				    char *buf, size_t nbytes,
8436 				    loff_t off)
8437 {
8438 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
8439 }
8440 
cpu_uclamp_print(struct seq_file * sf,enum uclamp_id clamp_id)8441 static inline void cpu_uclamp_print(struct seq_file *sf,
8442 				    enum uclamp_id clamp_id)
8443 {
8444 	struct task_group *tg;
8445 	u64 util_clamp;
8446 	u64 percent;
8447 	u32 rem;
8448 
8449 	rcu_read_lock();
8450 	tg = css_tg(seq_css(sf));
8451 	util_clamp = tg->uclamp_req[clamp_id].value;
8452 	rcu_read_unlock();
8453 
8454 	if (util_clamp == SCHED_CAPACITY_SCALE) {
8455 		seq_puts(sf, "max\n");
8456 		return;
8457 	}
8458 
8459 	percent = tg->uclamp_pct[clamp_id];
8460 	percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
8461 	seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
8462 }
8463 
cpu_uclamp_min_show(struct seq_file * sf,void * v)8464 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
8465 {
8466 	cpu_uclamp_print(sf, UCLAMP_MIN);
8467 	return 0;
8468 }
8469 
cpu_uclamp_max_show(struct seq_file * sf,void * v)8470 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
8471 {
8472 	cpu_uclamp_print(sf, UCLAMP_MAX);
8473 	return 0;
8474 }
8475 
8476 #ifdef CONFIG_SCHED_RTG_CGROUP
sched_colocate_read(struct cgroup_subsys_state * css,struct cftype * cft)8477 static u64 sched_colocate_read(struct cgroup_subsys_state *css,
8478 				struct cftype *cft)
8479 {
8480 	struct task_group *tg = css_tg(css);
8481 
8482 	return (u64) tg->colocate;
8483 }
8484 
sched_colocate_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 colocate)8485 static int sched_colocate_write(struct cgroup_subsys_state *css,
8486 				struct cftype *cft, u64 colocate)
8487 {
8488 	struct task_group *tg = css_tg(css);
8489 
8490 	if (tg->colocate_update_disabled)
8491 		return -EPERM;
8492 
8493 	tg->colocate = !!colocate;
8494 	tg->colocate_update_disabled = true;
8495 
8496 	return 0;
8497 }
8498 #endif /* CONFIG_SCHED_RTG_CGROUP */
8499 #endif /* CONFIG_UCLAMP_TASK_GROUP */
8500 
8501 #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)8502 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8503 				struct cftype *cftype, u64 shareval)
8504 {
8505 	if (shareval > scale_load_down(ULONG_MAX))
8506 		shareval = MAX_SHARES;
8507 	return sched_group_set_shares(css_tg(css), scale_load(shareval));
8508 }
8509 
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)8510 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8511 			       struct cftype *cft)
8512 {
8513 	struct task_group *tg = css_tg(css);
8514 
8515 	return (u64) scale_load_down(tg->shares);
8516 }
8517 
8518 #ifdef CONFIG_CFS_BANDWIDTH
8519 static DEFINE_MUTEX(cfs_constraints_mutex);
8520 
8521 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8522 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8523 /* More than 203 days if BW_SHIFT equals 20. */
8524 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
8525 
8526 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8527 
tg_set_cfs_bandwidth(struct task_group * tg,u64 period,u64 quota)8528 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8529 {
8530 	int i, ret = 0, runtime_enabled, runtime_was_enabled;
8531 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8532 
8533 	if (tg == &root_task_group)
8534 		return -EINVAL;
8535 
8536 	/*
8537 	 * Ensure we have at some amount of bandwidth every period.  This is
8538 	 * to prevent reaching a state of large arrears when throttled via
8539 	 * entity_tick() resulting in prolonged exit starvation.
8540 	 */
8541 	if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8542 		return -EINVAL;
8543 
8544 	/*
8545 	 * Likewise, bound things on the otherside by preventing insane quota
8546 	 * periods.  This also allows us to normalize in computing quota
8547 	 * feasibility.
8548 	 */
8549 	if (period > max_cfs_quota_period)
8550 		return -EINVAL;
8551 
8552 	/*
8553 	 * Bound quota to defend quota against overflow during bandwidth shift.
8554 	 */
8555 	if (quota != RUNTIME_INF && quota > max_cfs_runtime)
8556 		return -EINVAL;
8557 
8558 	/*
8559 	 * Prevent race between setting of cfs_rq->runtime_enabled and
8560 	 * unthrottle_offline_cfs_rqs().
8561 	 */
8562 	get_online_cpus();
8563 	mutex_lock(&cfs_constraints_mutex);
8564 	ret = __cfs_schedulable(tg, period, quota);
8565 	if (ret)
8566 		goto out_unlock;
8567 
8568 	runtime_enabled = quota != RUNTIME_INF;
8569 	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
8570 	/*
8571 	 * If we need to toggle cfs_bandwidth_used, off->on must occur
8572 	 * before making related changes, and on->off must occur afterwards
8573 	 */
8574 	if (runtime_enabled && !runtime_was_enabled)
8575 		cfs_bandwidth_usage_inc();
8576 	raw_spin_lock_irq(&cfs_b->lock);
8577 	cfs_b->period = ns_to_ktime(period);
8578 	cfs_b->quota = quota;
8579 
8580 	__refill_cfs_bandwidth_runtime(cfs_b);
8581 
8582 	/* Restart the period timer (if active) to handle new period expiry: */
8583 	if (runtime_enabled)
8584 		start_cfs_bandwidth(cfs_b);
8585 
8586 	raw_spin_unlock_irq(&cfs_b->lock);
8587 
8588 	for_each_online_cpu(i) {
8589 		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
8590 		struct rq *rq = cfs_rq->rq;
8591 		struct rq_flags rf;
8592 
8593 		rq_lock_irq(rq, &rf);
8594 		cfs_rq->runtime_enabled = runtime_enabled;
8595 		cfs_rq->runtime_remaining = 0;
8596 
8597 		if (cfs_rq->throttled)
8598 			unthrottle_cfs_rq(cfs_rq);
8599 		rq_unlock_irq(rq, &rf);
8600 	}
8601 	if (runtime_was_enabled && !runtime_enabled)
8602 		cfs_bandwidth_usage_dec();
8603 out_unlock:
8604 	mutex_unlock(&cfs_constraints_mutex);
8605 	put_online_cpus();
8606 
8607 	return ret;
8608 }
8609 
tg_set_cfs_quota(struct task_group * tg,long cfs_quota_us)8610 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8611 {
8612 	u64 quota, period;
8613 
8614 	period = ktime_to_ns(tg->cfs_bandwidth.period);
8615 	if (cfs_quota_us < 0)
8616 		quota = RUNTIME_INF;
8617 	else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
8618 		quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8619 	else
8620 		return -EINVAL;
8621 
8622 	return tg_set_cfs_bandwidth(tg, period, quota);
8623 }
8624 
tg_get_cfs_quota(struct task_group * tg)8625 static long tg_get_cfs_quota(struct task_group *tg)
8626 {
8627 	u64 quota_us;
8628 
8629 	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
8630 		return -1;
8631 
8632 	quota_us = tg->cfs_bandwidth.quota;
8633 	do_div(quota_us, NSEC_PER_USEC);
8634 
8635 	return quota_us;
8636 }
8637 
tg_set_cfs_period(struct task_group * tg,long cfs_period_us)8638 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8639 {
8640 	u64 quota, period;
8641 
8642 	if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
8643 		return -EINVAL;
8644 
8645 	period = (u64)cfs_period_us * NSEC_PER_USEC;
8646 	quota = tg->cfs_bandwidth.quota;
8647 
8648 	return tg_set_cfs_bandwidth(tg, period, quota);
8649 }
8650 
tg_get_cfs_period(struct task_group * tg)8651 static long tg_get_cfs_period(struct task_group *tg)
8652 {
8653 	u64 cfs_period_us;
8654 
8655 	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
8656 	do_div(cfs_period_us, NSEC_PER_USEC);
8657 
8658 	return cfs_period_us;
8659 }
8660 
cpu_cfs_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)8661 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
8662 				  struct cftype *cft)
8663 {
8664 	return tg_get_cfs_quota(css_tg(css));
8665 }
8666 
cpu_cfs_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 cfs_quota_us)8667 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
8668 				   struct cftype *cftype, s64 cfs_quota_us)
8669 {
8670 	return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
8671 }
8672 
cpu_cfs_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)8673 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
8674 				   struct cftype *cft)
8675 {
8676 	return tg_get_cfs_period(css_tg(css));
8677 }
8678 
cpu_cfs_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_period_us)8679 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
8680 				    struct cftype *cftype, u64 cfs_period_us)
8681 {
8682 	return tg_set_cfs_period(css_tg(css), cfs_period_us);
8683 }
8684 
8685 struct cfs_schedulable_data {
8686 	struct task_group *tg;
8687 	u64 period, quota;
8688 };
8689 
8690 /*
8691  * normalize group quota/period to be quota/max_period
8692  * note: units are usecs
8693  */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)8694 static u64 normalize_cfs_quota(struct task_group *tg,
8695 			       struct cfs_schedulable_data *d)
8696 {
8697 	u64 quota, period;
8698 
8699 	if (tg == d->tg) {
8700 		period = d->period;
8701 		quota = d->quota;
8702 	} else {
8703 		period = tg_get_cfs_period(tg);
8704 		quota = tg_get_cfs_quota(tg);
8705 	}
8706 
8707 	/* note: these should typically be equivalent */
8708 	if (quota == RUNTIME_INF || quota == -1)
8709 		return RUNTIME_INF;
8710 
8711 	return to_ratio(period, quota);
8712 }
8713 
tg_cfs_schedulable_down(struct task_group * tg,void * data)8714 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8715 {
8716 	struct cfs_schedulable_data *d = data;
8717 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8718 	s64 quota = 0, parent_quota = -1;
8719 
8720 	if (!tg->parent) {
8721 		quota = RUNTIME_INF;
8722 	} else {
8723 		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
8724 
8725 		quota = normalize_cfs_quota(tg, d);
8726 		parent_quota = parent_b->hierarchical_quota;
8727 
8728 		/*
8729 		 * Ensure max(child_quota) <= parent_quota.  On cgroup2,
8730 		 * always take the min.  On cgroup1, only inherit when no
8731 		 * limit is set:
8732 		 */
8733 		if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
8734 			quota = min(quota, parent_quota);
8735 		} else {
8736 			if (quota == RUNTIME_INF)
8737 				quota = parent_quota;
8738 			else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8739 				return -EINVAL;
8740 		}
8741 	}
8742 	cfs_b->hierarchical_quota = quota;
8743 
8744 	return 0;
8745 }
8746 
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)8747 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8748 {
8749 	int ret;
8750 	struct cfs_schedulable_data data = {
8751 		.tg = tg,
8752 		.period = period,
8753 		.quota = quota,
8754 	};
8755 
8756 	if (quota != RUNTIME_INF) {
8757 		do_div(data.period, NSEC_PER_USEC);
8758 		do_div(data.quota, NSEC_PER_USEC);
8759 	}
8760 
8761 	rcu_read_lock();
8762 	ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8763 	rcu_read_unlock();
8764 
8765 	return ret;
8766 }
8767 
cpu_cfs_stat_show(struct seq_file * sf,void * v)8768 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
8769 {
8770 	struct task_group *tg = css_tg(seq_css(sf));
8771 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8772 
8773 	seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
8774 	seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
8775 	seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
8776 
8777 	if (schedstat_enabled() && tg != &root_task_group) {
8778 		u64 ws = 0;
8779 		int i;
8780 
8781 		for_each_possible_cpu(i)
8782 			ws += schedstat_val(tg->se[i]->statistics.wait_sum);
8783 
8784 		seq_printf(sf, "wait_sum %llu\n", ws);
8785 	}
8786 
8787 	return 0;
8788 }
8789 #endif /* CONFIG_CFS_BANDWIDTH */
8790 #endif /* CONFIG_FAIR_GROUP_SCHED */
8791 
8792 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)8793 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
8794 				struct cftype *cft, s64 val)
8795 {
8796 	return sched_group_set_rt_runtime(css_tg(css), val);
8797 }
8798 
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)8799 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
8800 			       struct cftype *cft)
8801 {
8802 	return sched_group_rt_runtime(css_tg(css));
8803 }
8804 
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)8805 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
8806 				    struct cftype *cftype, u64 rt_period_us)
8807 {
8808 	return sched_group_set_rt_period(css_tg(css), rt_period_us);
8809 }
8810 
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)8811 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
8812 				   struct cftype *cft)
8813 {
8814 	return sched_group_rt_period(css_tg(css));
8815 }
8816 #endif /* CONFIG_RT_GROUP_SCHED */
8817 
8818 static struct cftype cpu_legacy_files[] = {
8819 #ifdef CONFIG_FAIR_GROUP_SCHED
8820 	{
8821 		.name = "shares",
8822 		.read_u64 = cpu_shares_read_u64,
8823 		.write_u64 = cpu_shares_write_u64,
8824 	},
8825 #endif
8826 #ifdef CONFIG_CFS_BANDWIDTH
8827 	{
8828 		.name = "cfs_quota_us",
8829 		.read_s64 = cpu_cfs_quota_read_s64,
8830 		.write_s64 = cpu_cfs_quota_write_s64,
8831 	},
8832 	{
8833 		.name = "cfs_period_us",
8834 		.read_u64 = cpu_cfs_period_read_u64,
8835 		.write_u64 = cpu_cfs_period_write_u64,
8836 	},
8837 	{
8838 		.name = "stat",
8839 		.seq_show = cpu_cfs_stat_show,
8840 	},
8841 #endif
8842 #ifdef CONFIG_RT_GROUP_SCHED
8843 	{
8844 		.name = "rt_runtime_us",
8845 		.read_s64 = cpu_rt_runtime_read,
8846 		.write_s64 = cpu_rt_runtime_write,
8847 	},
8848 	{
8849 		.name = "rt_period_us",
8850 		.read_u64 = cpu_rt_period_read_uint,
8851 		.write_u64 = cpu_rt_period_write_uint,
8852 	},
8853 #endif
8854 #ifdef CONFIG_UCLAMP_TASK_GROUP
8855 	{
8856 		.name = "uclamp.min",
8857 		.flags = CFTYPE_NOT_ON_ROOT,
8858 		.seq_show = cpu_uclamp_min_show,
8859 		.write = cpu_uclamp_min_write,
8860 	},
8861 	{
8862 		.name = "uclamp.max",
8863 		.flags = CFTYPE_NOT_ON_ROOT,
8864 		.seq_show = cpu_uclamp_max_show,
8865 		.write = cpu_uclamp_max_write,
8866 	},
8867 #ifdef CONFIG_SCHED_RTG_CGROUP
8868 	{
8869 		.name = "uclamp.colocate",
8870 		.flags = CFTYPE_NOT_ON_ROOT,
8871 		.read_u64 = sched_colocate_read,
8872 		.write_u64 = sched_colocate_write,
8873 	},
8874 #endif
8875 #endif
8876 	{ }	/* Terminate */
8877 };
8878 
cpu_extra_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)8879 static int cpu_extra_stat_show(struct seq_file *sf,
8880 			       struct cgroup_subsys_state *css)
8881 {
8882 #ifdef CONFIG_CFS_BANDWIDTH
8883 	{
8884 		struct task_group *tg = css_tg(css);
8885 		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8886 		u64 throttled_usec;
8887 
8888 		throttled_usec = cfs_b->throttled_time;
8889 		do_div(throttled_usec, NSEC_PER_USEC);
8890 
8891 		seq_printf(sf, "nr_periods %d\n"
8892 			   "nr_throttled %d\n"
8893 			   "throttled_usec %llu\n",
8894 			   cfs_b->nr_periods, cfs_b->nr_throttled,
8895 			   throttled_usec);
8896 	}
8897 #endif
8898 	return 0;
8899 }
8900 
8901 #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_weight_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)8902 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
8903 			       struct cftype *cft)
8904 {
8905 	struct task_group *tg = css_tg(css);
8906 	u64 weight = scale_load_down(tg->shares);
8907 
8908 	return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
8909 }
8910 
cpu_weight_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 weight)8911 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
8912 				struct cftype *cft, u64 weight)
8913 {
8914 	/*
8915 	 * cgroup weight knobs should use the common MIN, DFL and MAX
8916 	 * values which are 1, 100 and 10000 respectively.  While it loses
8917 	 * a bit of range on both ends, it maps pretty well onto the shares
8918 	 * value used by scheduler and the round-trip conversions preserve
8919 	 * the original value over the entire range.
8920 	 */
8921 	if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
8922 		return -ERANGE;
8923 
8924 	weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
8925 
8926 	return sched_group_set_shares(css_tg(css), scale_load(weight));
8927 }
8928 
cpu_weight_nice_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)8929 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
8930 				    struct cftype *cft)
8931 {
8932 	unsigned long weight = scale_load_down(css_tg(css)->shares);
8933 	int last_delta = INT_MAX;
8934 	int prio, delta;
8935 
8936 	/* find the closest nice value to the current weight */
8937 	for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
8938 		delta = abs(sched_prio_to_weight[prio] - weight);
8939 		if (delta >= last_delta)
8940 			break;
8941 		last_delta = delta;
8942 	}
8943 
8944 	return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
8945 }
8946 
cpu_weight_nice_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 nice)8947 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
8948 				     struct cftype *cft, s64 nice)
8949 {
8950 	unsigned long weight;
8951 	int idx;
8952 
8953 	if (nice < MIN_NICE || nice > MAX_NICE)
8954 		return -ERANGE;
8955 
8956 	idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
8957 	idx = array_index_nospec(idx, 40);
8958 	weight = sched_prio_to_weight[idx];
8959 
8960 	return sched_group_set_shares(css_tg(css), scale_load(weight));
8961 }
8962 #endif
8963 
cpu_period_quota_print(struct seq_file * sf,long period,long quota)8964 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
8965 						  long period, long quota)
8966 {
8967 	if (quota < 0)
8968 		seq_puts(sf, "max");
8969 	else
8970 		seq_printf(sf, "%ld", quota);
8971 
8972 	seq_printf(sf, " %ld\n", period);
8973 }
8974 
8975 /* caller should put the current value in *@periodp before calling */
cpu_period_quota_parse(char * buf,u64 * periodp,u64 * quotap)8976 static int __maybe_unused cpu_period_quota_parse(char *buf,
8977 						 u64 *periodp, u64 *quotap)
8978 {
8979 	char tok[21];	/* U64_MAX */
8980 
8981 	if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
8982 		return -EINVAL;
8983 
8984 	*periodp *= NSEC_PER_USEC;
8985 
8986 	if (sscanf(tok, "%llu", quotap))
8987 		*quotap *= NSEC_PER_USEC;
8988 	else if (!strcmp(tok, "max"))
8989 		*quotap = RUNTIME_INF;
8990 	else
8991 		return -EINVAL;
8992 
8993 	return 0;
8994 }
8995 
8996 #ifdef CONFIG_CFS_BANDWIDTH
cpu_max_show(struct seq_file * sf,void * v)8997 static int cpu_max_show(struct seq_file *sf, void *v)
8998 {
8999 	struct task_group *tg = css_tg(seq_css(sf));
9000 
9001 	cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
9002 	return 0;
9003 }
9004 
cpu_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9005 static ssize_t cpu_max_write(struct kernfs_open_file *of,
9006 			     char *buf, size_t nbytes, loff_t off)
9007 {
9008 	struct task_group *tg = css_tg(of_css(of));
9009 	u64 period = tg_get_cfs_period(tg);
9010 	u64 quota;
9011 	int ret;
9012 
9013 	ret = cpu_period_quota_parse(buf, &period, &quota);
9014 	if (!ret)
9015 		ret = tg_set_cfs_bandwidth(tg, period, quota);
9016 	return ret ?: nbytes;
9017 }
9018 #endif
9019 
9020 static struct cftype cpu_files[] = {
9021 #ifdef CONFIG_FAIR_GROUP_SCHED
9022 	{
9023 		.name = "weight",
9024 		.flags = CFTYPE_NOT_ON_ROOT,
9025 		.read_u64 = cpu_weight_read_u64,
9026 		.write_u64 = cpu_weight_write_u64,
9027 	},
9028 	{
9029 		.name = "weight.nice",
9030 		.flags = CFTYPE_NOT_ON_ROOT,
9031 		.read_s64 = cpu_weight_nice_read_s64,
9032 		.write_s64 = cpu_weight_nice_write_s64,
9033 	},
9034 #endif
9035 #ifdef CONFIG_CFS_BANDWIDTH
9036 	{
9037 		.name = "max",
9038 		.flags = CFTYPE_NOT_ON_ROOT,
9039 		.seq_show = cpu_max_show,
9040 		.write = cpu_max_write,
9041 	},
9042 #endif
9043 #ifdef CONFIG_UCLAMP_TASK_GROUP
9044 	{
9045 		.name = "uclamp.min",
9046 		.flags = CFTYPE_NOT_ON_ROOT,
9047 		.seq_show = cpu_uclamp_min_show,
9048 		.write = cpu_uclamp_min_write,
9049 	},
9050 	{
9051 		.name = "uclamp.max",
9052 		.flags = CFTYPE_NOT_ON_ROOT,
9053 		.seq_show = cpu_uclamp_max_show,
9054 		.write = cpu_uclamp_max_write,
9055 	},
9056 #endif
9057 	{ }	/* terminate */
9058 };
9059 
9060 struct cgroup_subsys cpu_cgrp_subsys = {
9061 	.css_alloc	= cpu_cgroup_css_alloc,
9062 	.css_online	= cpu_cgroup_css_online,
9063 	.css_released	= cpu_cgroup_css_released,
9064 	.css_free	= cpu_cgroup_css_free,
9065 	.css_extra_stat_show = cpu_extra_stat_show,
9066 	.fork		= cpu_cgroup_fork,
9067 	.can_attach	= cpu_cgroup_can_attach,
9068 	.attach		= cpu_cgroup_attach,
9069 	.legacy_cftypes	= cpu_legacy_files,
9070 	.dfl_cftypes	= cpu_files,
9071 	.early_init	= true,
9072 	.threaded	= true,
9073 };
9074 
9075 #endif	/* CONFIG_CGROUP_SCHED */
9076 
dump_cpu_task(int cpu)9077 void dump_cpu_task(int cpu)
9078 {
9079 	pr_info("Task dump for CPU %d:\n", cpu);
9080 	sched_show_task(cpu_curr(cpu));
9081 }
9082 
9083 /*
9084  * Nice levels are multiplicative, with a gentle 10% change for every
9085  * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
9086  * nice 1, it will get ~10% less CPU time than another CPU-bound task
9087  * that remained on nice 0.
9088  *
9089  * The "10% effect" is relative and cumulative: from _any_ nice level,
9090  * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
9091  * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
9092  * If a task goes up by ~10% and another task goes down by ~10% then
9093  * the relative distance between them is ~25%.)
9094  */
9095 const int sched_prio_to_weight[40] = {
9096  /* -20 */     88761,     71755,     56483,     46273,     36291,
9097  /* -15 */     29154,     23254,     18705,     14949,     11916,
9098  /* -10 */      9548,      7620,      6100,      4904,      3906,
9099  /*  -5 */      3121,      2501,      1991,      1586,      1277,
9100  /*   0 */      1024,       820,       655,       526,       423,
9101  /*   5 */       335,       272,       215,       172,       137,
9102  /*  10 */       110,        87,        70,        56,        45,
9103  /*  15 */        36,        29,        23,        18,        15,
9104 };
9105 
9106 /*
9107  * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
9108  *
9109  * In cases where the weight does not change often, we can use the
9110  * precalculated inverse to speed up arithmetics by turning divisions
9111  * into multiplications:
9112  */
9113 const u32 sched_prio_to_wmult[40] = {
9114  /* -20 */     48388,     59856,     76040,     92818,    118348,
9115  /* -15 */    147320,    184698,    229616,    287308,    360437,
9116  /* -10 */    449829,    563644,    704093,    875809,   1099582,
9117  /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
9118  /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
9119  /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
9120  /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
9121  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
9122 };
9123 
9124 #ifdef CONFIG_SCHED_LATENCY_NICE
9125 /*
9126  * latency weight for wakeup preemption
9127  */
9128 const int sched_latency_to_weight[40] = {
9129  /* -20 */      1024,       973,       922,       870,       819,
9130  /* -15 */       768,       717,       666,       614,       563,
9131  /* -10 */       512,       461,       410,       358,       307,
9132  /*  -5 */       256,       205,       154,       102,       51,
9133  /*   0 */	   0,       -51,      -102,      -154,      -205,
9134  /*   5 */      -256,      -307,      -358,      -410,      -461,
9135  /*  10 */      -512,      -563,      -614,      -666,      -717,
9136  /*  15 */      -768,      -819,      -870,      -922,      -973,
9137 };
9138 #endif
9139 
call_trace_sched_update_nr_running(struct rq * rq,int count)9140 void call_trace_sched_update_nr_running(struct rq *rq, int count)
9141 {
9142         trace_sched_update_nr_running_tp(rq, count);
9143 }
9144 
9145 #ifdef CONFIG_SCHED_WALT
9146 /*
9147  * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field
9148  *
9149  * Stop accounting (exiting) task's future cpu usage
9150  *
9151  * We need this so that reset_all_windows_stats() can function correctly.
9152  * reset_all_window_stats() depends on do_each_thread/for_each_thread task
9153  * iterators to reset *all* task's statistics. Exiting tasks however become
9154  * invisible to those iterators. sched_exit() is called on a exiting task prior
9155  * to being removed from task_list, which will let reset_all_window_stats()
9156  * function correctly.
9157  */
sched_exit(struct task_struct * p)9158 void sched_exit(struct task_struct *p)
9159 {
9160 	struct rq_flags rf;
9161 	struct rq *rq;
9162 	u64 wallclock;
9163 
9164 #ifdef CONFIG_SCHED_RTG
9165 	sched_set_group_id(p, 0);
9166 #endif
9167 
9168 	rq = task_rq_lock(p, &rf);
9169 
9170 	/* rq->curr == p */
9171 	wallclock = sched_ktime_clock();
9172 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
9173 	dequeue_task(rq, p, 0);
9174 	/*
9175 	 * task's contribution is already removed from the
9176 	 * cumulative window demand in dequeue. As the
9177 	 * task's stats are reset, the next enqueue does
9178 	 * not change the cumulative window demand.
9179 	 */
9180 	reset_task_stats(p);
9181 	p->ravg.mark_start = wallclock;
9182 	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
9183 
9184 	enqueue_task(rq, p, 0);
9185 	task_rq_unlock(rq, p, &rf);
9186 	free_task_load_ptrs(p);
9187 }
9188 #endif /* CONFIG_SCHED_WALT */
9189