• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4  * policies)
5  */
6 #include "sched.h"
7 
8 #include "pelt.h"
9 
10 #include <trace/hooks/sched.h>
11 
12 int sched_rr_timeslice = RR_TIMESLICE;
13 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
14 /* More than 4 hours if BW_SHIFT equals 20. */
15 static const u64 max_rt_runtime = MAX_BW;
16 
17 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
18 
19 struct rt_bandwidth def_rt_bandwidth;
20 
sched_rt_period_timer(struct hrtimer * timer)21 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
22 {
23 	struct rt_bandwidth *rt_b =
24 		container_of(timer, struct rt_bandwidth, rt_period_timer);
25 	int idle = 0;
26 	int overrun;
27 
28 	raw_spin_lock(&rt_b->rt_runtime_lock);
29 	for (;;) {
30 		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
31 		if (!overrun)
32 			break;
33 
34 		raw_spin_unlock(&rt_b->rt_runtime_lock);
35 		idle = do_sched_rt_period_timer(rt_b, overrun);
36 		raw_spin_lock(&rt_b->rt_runtime_lock);
37 	}
38 	if (idle)
39 		rt_b->rt_period_active = 0;
40 	raw_spin_unlock(&rt_b->rt_runtime_lock);
41 
42 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
43 }
44 
init_rt_bandwidth(struct rt_bandwidth * rt_b,u64 period,u64 runtime)45 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
46 {
47 	rt_b->rt_period = ns_to_ktime(period);
48 	rt_b->rt_runtime = runtime;
49 
50 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
51 
52 	hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
53 		     HRTIMER_MODE_REL_HARD);
54 	rt_b->rt_period_timer.function = sched_rt_period_timer;
55 }
56 
do_start_rt_bandwidth(struct rt_bandwidth * rt_b)57 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
58 {
59 	raw_spin_lock(&rt_b->rt_runtime_lock);
60 	if (!rt_b->rt_period_active) {
61 		rt_b->rt_period_active = 1;
62 		/*
63 		 * SCHED_DEADLINE updates the bandwidth, as a run away
64 		 * RT task with a DL task could hog a CPU. But DL does
65 		 * not reset the period. If a deadline task was running
66 		 * without an RT task running, it can cause RT tasks to
67 		 * throttle when they start up. Kick the timer right away
68 		 * to update the period.
69 		 */
70 		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
71 		hrtimer_start_expires(&rt_b->rt_period_timer,
72 				      HRTIMER_MODE_ABS_PINNED_HARD);
73 	}
74 	raw_spin_unlock(&rt_b->rt_runtime_lock);
75 }
76 
start_rt_bandwidth(struct rt_bandwidth * rt_b)77 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
78 {
79 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
80 		return;
81 
82 	do_start_rt_bandwidth(rt_b);
83 }
84 
init_rt_rq(struct rt_rq * rt_rq)85 void init_rt_rq(struct rt_rq *rt_rq)
86 {
87 	struct rt_prio_array *array;
88 	int i;
89 
90 	array = &rt_rq->active;
91 	for (i = 0; i < MAX_RT_PRIO; i++) {
92 		INIT_LIST_HEAD(array->queue + i);
93 		__clear_bit(i, array->bitmap);
94 	}
95 	/* delimiter for bitsearch: */
96 	__set_bit(MAX_RT_PRIO, array->bitmap);
97 
98 #if defined CONFIG_SMP
99 	rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
100 	rt_rq->highest_prio.next = MAX_RT_PRIO-1;
101 	rt_rq->rt_nr_migratory = 0;
102 	rt_rq->overloaded = 0;
103 	plist_head_init(&rt_rq->pushable_tasks);
104 #endif /* CONFIG_SMP */
105 	/* We start is dequeued state, because no RT tasks are queued */
106 	rt_rq->rt_queued = 0;
107 
108 	rt_rq->rt_time = 0;
109 	rt_rq->rt_throttled = 0;
110 	rt_rq->rt_runtime = 0;
111 	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
112 }
113 
114 #ifdef CONFIG_RT_GROUP_SCHED
destroy_rt_bandwidth(struct rt_bandwidth * rt_b)115 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
116 {
117 	hrtimer_cancel(&rt_b->rt_period_timer);
118 }
119 
120 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
121 
rt_task_of(struct sched_rt_entity * rt_se)122 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
123 {
124 #ifdef CONFIG_SCHED_DEBUG
125 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
126 #endif
127 	return container_of(rt_se, struct task_struct, rt);
128 }
129 
rq_of_rt_rq(struct rt_rq * rt_rq)130 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
131 {
132 	return rt_rq->rq;
133 }
134 
rt_rq_of_se(struct sched_rt_entity * rt_se)135 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
136 {
137 	return rt_se->rt_rq;
138 }
139 
rq_of_rt_se(struct sched_rt_entity * rt_se)140 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
141 {
142 	struct rt_rq *rt_rq = rt_se->rt_rq;
143 
144 	return rt_rq->rq;
145 }
146 
unregister_rt_sched_group(struct task_group * tg)147 void unregister_rt_sched_group(struct task_group *tg)
148 {
149 	if (tg->rt_se)
150 		destroy_rt_bandwidth(&tg->rt_bandwidth);
151 
152 }
153 
free_rt_sched_group(struct task_group * tg)154 void free_rt_sched_group(struct task_group *tg)
155 {
156 	int i;
157 
158 	for_each_possible_cpu(i) {
159 		if (tg->rt_rq)
160 			kfree(tg->rt_rq[i]);
161 		if (tg->rt_se)
162 			kfree(tg->rt_se[i]);
163 	}
164 
165 	kfree(tg->rt_rq);
166 	kfree(tg->rt_se);
167 }
168 
init_tg_rt_entry(struct task_group * tg,struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int cpu,struct sched_rt_entity * parent)169 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
170 		struct sched_rt_entity *rt_se, int cpu,
171 		struct sched_rt_entity *parent)
172 {
173 	struct rq *rq = cpu_rq(cpu);
174 
175 	rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
176 	rt_rq->rt_nr_boosted = 0;
177 	rt_rq->rq = rq;
178 	rt_rq->tg = tg;
179 
180 	tg->rt_rq[cpu] = rt_rq;
181 	tg->rt_se[cpu] = rt_se;
182 
183 	if (!rt_se)
184 		return;
185 
186 	if (!parent)
187 		rt_se->rt_rq = &rq->rt;
188 	else
189 		rt_se->rt_rq = parent->my_q;
190 
191 	rt_se->my_q = rt_rq;
192 	rt_se->parent = parent;
193 	INIT_LIST_HEAD(&rt_se->run_list);
194 }
195 
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)196 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
197 {
198 	struct rt_rq *rt_rq;
199 	struct sched_rt_entity *rt_se;
200 	int i;
201 
202 	tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
203 	if (!tg->rt_rq)
204 		goto err;
205 	tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
206 	if (!tg->rt_se)
207 		goto err;
208 
209 	init_rt_bandwidth(&tg->rt_bandwidth,
210 			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
211 
212 	for_each_possible_cpu(i) {
213 		rt_rq = kzalloc_node(sizeof(struct rt_rq),
214 				     GFP_KERNEL, cpu_to_node(i));
215 		if (!rt_rq)
216 			goto err;
217 
218 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
219 				     GFP_KERNEL, cpu_to_node(i));
220 		if (!rt_se)
221 			goto err_free_rq;
222 
223 		init_rt_rq(rt_rq);
224 		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
225 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
226 	}
227 
228 	return 1;
229 
230 err_free_rq:
231 	kfree(rt_rq);
232 err:
233 	return 0;
234 }
235 
236 #else /* CONFIG_RT_GROUP_SCHED */
237 
238 #define rt_entity_is_task(rt_se) (1)
239 
rt_task_of(struct sched_rt_entity * rt_se)240 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
241 {
242 	return container_of(rt_se, struct task_struct, rt);
243 }
244 
rq_of_rt_rq(struct rt_rq * rt_rq)245 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
246 {
247 	return container_of(rt_rq, struct rq, rt);
248 }
249 
rq_of_rt_se(struct sched_rt_entity * rt_se)250 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
251 {
252 	struct task_struct *p = rt_task_of(rt_se);
253 
254 	return task_rq(p);
255 }
256 
rt_rq_of_se(struct sched_rt_entity * rt_se)257 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
258 {
259 	struct rq *rq = rq_of_rt_se(rt_se);
260 
261 	return &rq->rt;
262 }
263 
unregister_rt_sched_group(struct task_group * tg)264 void unregister_rt_sched_group(struct task_group *tg) { }
265 
free_rt_sched_group(struct task_group * tg)266 void free_rt_sched_group(struct task_group *tg) { }
267 
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)268 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
269 {
270 	return 1;
271 }
272 #endif /* CONFIG_RT_GROUP_SCHED */
273 
274 #ifdef CONFIG_SMP
275 
276 static void pull_rt_task(struct rq *this_rq);
277 
need_pull_rt_task(struct rq * rq,struct task_struct * prev)278 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
279 {
280 	/* Try to pull RT tasks here if we lower this rq's prio */
281 	return rq->online && rq->rt.highest_prio.curr > prev->prio;
282 }
283 
rt_overloaded(struct rq * rq)284 static inline int rt_overloaded(struct rq *rq)
285 {
286 	return atomic_read(&rq->rd->rto_count);
287 }
288 
rt_set_overload(struct rq * rq)289 static inline void rt_set_overload(struct rq *rq)
290 {
291 	if (!rq->online)
292 		return;
293 
294 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
295 	/*
296 	 * Make sure the mask is visible before we set
297 	 * the overload count. That is checked to determine
298 	 * if we should look at the mask. It would be a shame
299 	 * if we looked at the mask, but the mask was not
300 	 * updated yet.
301 	 *
302 	 * Matched by the barrier in pull_rt_task().
303 	 */
304 	smp_wmb();
305 	atomic_inc(&rq->rd->rto_count);
306 }
307 
rt_clear_overload(struct rq * rq)308 static inline void rt_clear_overload(struct rq *rq)
309 {
310 	if (!rq->online)
311 		return;
312 
313 	/* the order here really doesn't matter */
314 	atomic_dec(&rq->rd->rto_count);
315 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
316 }
317 
update_rt_migration(struct rt_rq * rt_rq)318 static void update_rt_migration(struct rt_rq *rt_rq)
319 {
320 	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
321 		if (!rt_rq->overloaded) {
322 			rt_set_overload(rq_of_rt_rq(rt_rq));
323 			rt_rq->overloaded = 1;
324 		}
325 	} else if (rt_rq->overloaded) {
326 		rt_clear_overload(rq_of_rt_rq(rt_rq));
327 		rt_rq->overloaded = 0;
328 	}
329 }
330 
inc_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)331 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
332 {
333 	struct task_struct *p;
334 
335 	if (!rt_entity_is_task(rt_se))
336 		return;
337 
338 	p = rt_task_of(rt_se);
339 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
340 
341 	rt_rq->rt_nr_total++;
342 	if (p->nr_cpus_allowed > 1)
343 		rt_rq->rt_nr_migratory++;
344 
345 	update_rt_migration(rt_rq);
346 }
347 
dec_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)348 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
349 {
350 	struct task_struct *p;
351 
352 	if (!rt_entity_is_task(rt_se))
353 		return;
354 
355 	p = rt_task_of(rt_se);
356 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
357 
358 	rt_rq->rt_nr_total--;
359 	if (p->nr_cpus_allowed > 1)
360 		rt_rq->rt_nr_migratory--;
361 
362 	update_rt_migration(rt_rq);
363 }
364 
has_pushable_tasks(struct rq * rq)365 static inline int has_pushable_tasks(struct rq *rq)
366 {
367 	return !plist_head_empty(&rq->rt.pushable_tasks);
368 }
369 
370 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
371 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
372 
373 static void push_rt_tasks(struct rq *);
374 static void pull_rt_task(struct rq *);
375 
rt_queue_push_tasks(struct rq * rq)376 static inline void rt_queue_push_tasks(struct rq *rq)
377 {
378 	if (!has_pushable_tasks(rq))
379 		return;
380 
381 	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
382 }
383 
rt_queue_pull_task(struct rq * rq)384 static inline void rt_queue_pull_task(struct rq *rq)
385 {
386 	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
387 }
388 
enqueue_pushable_task(struct rq * rq,struct task_struct * p)389 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
390 {
391 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
392 	plist_node_init(&p->pushable_tasks, p->prio);
393 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
394 
395 	/* Update the highest prio pushable task */
396 	if (p->prio < rq->rt.highest_prio.next)
397 		rq->rt.highest_prio.next = p->prio;
398 }
399 
dequeue_pushable_task(struct rq * rq,struct task_struct * p)400 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
401 {
402 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
403 
404 	/* Update the new highest prio pushable task */
405 	if (has_pushable_tasks(rq)) {
406 		p = plist_first_entry(&rq->rt.pushable_tasks,
407 				      struct task_struct, pushable_tasks);
408 		rq->rt.highest_prio.next = p->prio;
409 	} else {
410 		rq->rt.highest_prio.next = MAX_RT_PRIO-1;
411 	}
412 }
413 
414 #else
415 
enqueue_pushable_task(struct rq * rq,struct task_struct * p)416 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
417 {
418 }
419 
dequeue_pushable_task(struct rq * rq,struct task_struct * p)420 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
421 {
422 }
423 
424 static inline
inc_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)425 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
426 {
427 }
428 
429 static inline
dec_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)430 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
431 {
432 }
433 
need_pull_rt_task(struct rq * rq,struct task_struct * prev)434 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
435 {
436 	return false;
437 }
438 
pull_rt_task(struct rq * this_rq)439 static inline void pull_rt_task(struct rq *this_rq)
440 {
441 }
442 
rt_queue_push_tasks(struct rq * rq)443 static inline void rt_queue_push_tasks(struct rq *rq)
444 {
445 }
446 #endif /* CONFIG_SMP */
447 
448 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
449 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
450 
on_rt_rq(struct sched_rt_entity * rt_se)451 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
452 {
453 	return rt_se->on_rq;
454 }
455 
456 #ifdef CONFIG_UCLAMP_TASK
457 /*
458  * Verify the fitness of task @p to run on @cpu taking into account the uclamp
459  * settings.
460  *
461  * This check is only important for heterogeneous systems where uclamp_min value
462  * is higher than the capacity of a @cpu. For non-heterogeneous system this
463  * function will always return true.
464  *
465  * The function will return true if the capacity of the @cpu is >= the
466  * uclamp_min and false otherwise.
467  *
468  * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
469  * > uclamp_max.
470  */
rt_task_fits_capacity(struct task_struct * p,int cpu)471 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
472 {
473 	unsigned int min_cap;
474 	unsigned int max_cap;
475 	unsigned int cpu_cap;
476 
477 	/* Only heterogeneous systems can benefit from this check */
478 	if (!sched_asym_cpucap_active())
479 		return true;
480 
481 	min_cap = uclamp_eff_value(p, UCLAMP_MIN);
482 	max_cap = uclamp_eff_value(p, UCLAMP_MAX);
483 
484 	cpu_cap = capacity_orig_of(cpu);
485 
486 	return cpu_cap >= min(min_cap, max_cap);
487 }
488 #else
rt_task_fits_capacity(struct task_struct * p,int cpu)489 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
490 {
491 	return true;
492 }
493 #endif
494 
495 #ifdef CONFIG_RT_GROUP_SCHED
496 
sched_rt_runtime(struct rt_rq * rt_rq)497 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
498 {
499 	if (!rt_rq->tg)
500 		return RUNTIME_INF;
501 
502 	return rt_rq->rt_runtime;
503 }
504 
sched_rt_period(struct rt_rq * rt_rq)505 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
506 {
507 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
508 }
509 
510 typedef struct task_group *rt_rq_iter_t;
511 
next_task_group(struct task_group * tg)512 static inline struct task_group *next_task_group(struct task_group *tg)
513 {
514 	do {
515 		tg = list_entry_rcu(tg->list.next,
516 			typeof(struct task_group), list);
517 	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
518 
519 	if (&tg->list == &task_groups)
520 		tg = NULL;
521 
522 	return tg;
523 }
524 
525 #define for_each_rt_rq(rt_rq, iter, rq)					\
526 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
527 		(iter = next_task_group(iter)) &&			\
528 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
529 
530 #define for_each_sched_rt_entity(rt_se) \
531 	for (; rt_se; rt_se = rt_se->parent)
532 
group_rt_rq(struct sched_rt_entity * rt_se)533 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
534 {
535 	return rt_se->my_q;
536 }
537 
538 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
539 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
540 
sched_rt_rq_enqueue(struct rt_rq * rt_rq)541 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
542 {
543 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
544 	struct rq *rq = rq_of_rt_rq(rt_rq);
545 	struct sched_rt_entity *rt_se;
546 
547 	int cpu = cpu_of(rq);
548 
549 	rt_se = rt_rq->tg->rt_se[cpu];
550 
551 	if (rt_rq->rt_nr_running) {
552 		if (!rt_se)
553 			enqueue_top_rt_rq(rt_rq);
554 		else if (!on_rt_rq(rt_se))
555 			enqueue_rt_entity(rt_se, 0);
556 
557 		if (rt_rq->highest_prio.curr < curr->prio)
558 			resched_curr(rq);
559 	}
560 }
561 
sched_rt_rq_dequeue(struct rt_rq * rt_rq)562 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
563 {
564 	struct sched_rt_entity *rt_se;
565 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
566 
567 	rt_se = rt_rq->tg->rt_se[cpu];
568 
569 	if (!rt_se) {
570 		dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
571 		/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
572 		cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
573 	}
574 	else if (on_rt_rq(rt_se))
575 		dequeue_rt_entity(rt_se, 0);
576 }
577 
rt_rq_throttled(struct rt_rq * rt_rq)578 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
579 {
580 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
581 }
582 
rt_se_boosted(struct sched_rt_entity * rt_se)583 static int rt_se_boosted(struct sched_rt_entity *rt_se)
584 {
585 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
586 	struct task_struct *p;
587 
588 	if (rt_rq)
589 		return !!rt_rq->rt_nr_boosted;
590 
591 	p = rt_task_of(rt_se);
592 	return p->prio != p->normal_prio;
593 }
594 
595 #ifdef CONFIG_SMP
sched_rt_period_mask(void)596 static inline const struct cpumask *sched_rt_period_mask(void)
597 {
598 	return this_rq()->rd->span;
599 }
600 #else
sched_rt_period_mask(void)601 static inline const struct cpumask *sched_rt_period_mask(void)
602 {
603 	return cpu_online_mask;
604 }
605 #endif
606 
607 static inline
sched_rt_period_rt_rq(struct rt_bandwidth * rt_b,int cpu)608 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
609 {
610 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
611 }
612 
sched_rt_bandwidth(struct rt_rq * rt_rq)613 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
614 {
615 	return &rt_rq->tg->rt_bandwidth;
616 }
617 
618 #else /* !CONFIG_RT_GROUP_SCHED */
619 
sched_rt_runtime(struct rt_rq * rt_rq)620 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
621 {
622 	return rt_rq->rt_runtime;
623 }
624 
sched_rt_period(struct rt_rq * rt_rq)625 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
626 {
627 	return ktime_to_ns(def_rt_bandwidth.rt_period);
628 }
629 
630 typedef struct rt_rq *rt_rq_iter_t;
631 
632 #define for_each_rt_rq(rt_rq, iter, rq) \
633 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
634 
635 #define for_each_sched_rt_entity(rt_se) \
636 	for (; rt_se; rt_se = NULL)
637 
group_rt_rq(struct sched_rt_entity * rt_se)638 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
639 {
640 	return NULL;
641 }
642 
sched_rt_rq_enqueue(struct rt_rq * rt_rq)643 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
644 {
645 	struct rq *rq = rq_of_rt_rq(rt_rq);
646 
647 	if (!rt_rq->rt_nr_running)
648 		return;
649 
650 	enqueue_top_rt_rq(rt_rq);
651 	resched_curr(rq);
652 }
653 
sched_rt_rq_dequeue(struct rt_rq * rt_rq)654 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
655 {
656 	dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
657 }
658 
rt_rq_throttled(struct rt_rq * rt_rq)659 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
660 {
661 	return rt_rq->rt_throttled;
662 }
663 
sched_rt_period_mask(void)664 static inline const struct cpumask *sched_rt_period_mask(void)
665 {
666 	return cpu_online_mask;
667 }
668 
669 static inline
sched_rt_period_rt_rq(struct rt_bandwidth * rt_b,int cpu)670 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
671 {
672 	return &cpu_rq(cpu)->rt;
673 }
674 
sched_rt_bandwidth(struct rt_rq * rt_rq)675 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
676 {
677 	return &def_rt_bandwidth;
678 }
679 
680 #endif /* CONFIG_RT_GROUP_SCHED */
681 
sched_rt_bandwidth_account(struct rt_rq * rt_rq)682 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
683 {
684 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
685 
686 	return (hrtimer_active(&rt_b->rt_period_timer) ||
687 		rt_rq->rt_time < rt_b->rt_runtime);
688 }
689 
690 #ifdef CONFIG_SMP
691 /*
692  * We ran out of runtime, see if we can borrow some from our neighbours.
693  */
do_balance_runtime(struct rt_rq * rt_rq)694 static void do_balance_runtime(struct rt_rq *rt_rq)
695 {
696 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
697 	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
698 	int i, weight;
699 	u64 rt_period;
700 
701 	weight = cpumask_weight(rd->span);
702 
703 	raw_spin_lock(&rt_b->rt_runtime_lock);
704 	rt_period = ktime_to_ns(rt_b->rt_period);
705 	for_each_cpu(i, rd->span) {
706 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
707 		s64 diff;
708 
709 		if (iter == rt_rq)
710 			continue;
711 
712 		raw_spin_lock(&iter->rt_runtime_lock);
713 		/*
714 		 * Either all rqs have inf runtime and there's nothing to steal
715 		 * or __disable_runtime() below sets a specific rq to inf to
716 		 * indicate its been disabled and disallow stealing.
717 		 */
718 		if (iter->rt_runtime == RUNTIME_INF)
719 			goto next;
720 
721 		/*
722 		 * From runqueues with spare time, take 1/n part of their
723 		 * spare time, but no more than our period.
724 		 */
725 		diff = iter->rt_runtime - iter->rt_time;
726 		if (diff > 0) {
727 			diff = div_u64((u64)diff, weight);
728 			if (rt_rq->rt_runtime + diff > rt_period)
729 				diff = rt_period - rt_rq->rt_runtime;
730 			iter->rt_runtime -= diff;
731 			rt_rq->rt_runtime += diff;
732 			if (rt_rq->rt_runtime == rt_period) {
733 				raw_spin_unlock(&iter->rt_runtime_lock);
734 				break;
735 			}
736 		}
737 next:
738 		raw_spin_unlock(&iter->rt_runtime_lock);
739 	}
740 	raw_spin_unlock(&rt_b->rt_runtime_lock);
741 }
742 
743 /*
744  * Ensure this RQ takes back all the runtime it lend to its neighbours.
745  */
__disable_runtime(struct rq * rq)746 static void __disable_runtime(struct rq *rq)
747 {
748 	struct root_domain *rd = rq->rd;
749 	rt_rq_iter_t iter;
750 	struct rt_rq *rt_rq;
751 
752 	if (unlikely(!scheduler_running))
753 		return;
754 
755 	for_each_rt_rq(rt_rq, iter, rq) {
756 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
757 		s64 want;
758 		int i;
759 
760 		raw_spin_lock(&rt_b->rt_runtime_lock);
761 		raw_spin_lock(&rt_rq->rt_runtime_lock);
762 		/*
763 		 * Either we're all inf and nobody needs to borrow, or we're
764 		 * already disabled and thus have nothing to do, or we have
765 		 * exactly the right amount of runtime to take out.
766 		 */
767 		if (rt_rq->rt_runtime == RUNTIME_INF ||
768 				rt_rq->rt_runtime == rt_b->rt_runtime)
769 			goto balanced;
770 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
771 
772 		/*
773 		 * Calculate the difference between what we started out with
774 		 * and what we current have, that's the amount of runtime
775 		 * we lend and now have to reclaim.
776 		 */
777 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
778 
779 		/*
780 		 * Greedy reclaim, take back as much as we can.
781 		 */
782 		for_each_cpu(i, rd->span) {
783 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
784 			s64 diff;
785 
786 			/*
787 			 * Can't reclaim from ourselves or disabled runqueues.
788 			 */
789 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
790 				continue;
791 
792 			raw_spin_lock(&iter->rt_runtime_lock);
793 			if (want > 0) {
794 				diff = min_t(s64, iter->rt_runtime, want);
795 				iter->rt_runtime -= diff;
796 				want -= diff;
797 			} else {
798 				iter->rt_runtime -= want;
799 				want -= want;
800 			}
801 			raw_spin_unlock(&iter->rt_runtime_lock);
802 
803 			if (!want)
804 				break;
805 		}
806 
807 		raw_spin_lock(&rt_rq->rt_runtime_lock);
808 		/*
809 		 * We cannot be left wanting - that would mean some runtime
810 		 * leaked out of the system.
811 		 */
812 		BUG_ON(want);
813 balanced:
814 		/*
815 		 * Disable all the borrow logic by pretending we have inf
816 		 * runtime - in which case borrowing doesn't make sense.
817 		 */
818 		rt_rq->rt_runtime = RUNTIME_INF;
819 		rt_rq->rt_throttled = 0;
820 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
821 		raw_spin_unlock(&rt_b->rt_runtime_lock);
822 
823 		/* Make rt_rq available for pick_next_task() */
824 		sched_rt_rq_enqueue(rt_rq);
825 	}
826 }
827 
__enable_runtime(struct rq * rq)828 static void __enable_runtime(struct rq *rq)
829 {
830 	rt_rq_iter_t iter;
831 	struct rt_rq *rt_rq;
832 
833 	if (unlikely(!scheduler_running))
834 		return;
835 
836 	/*
837 	 * Reset each runqueue's bandwidth settings
838 	 */
839 	for_each_rt_rq(rt_rq, iter, rq) {
840 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
841 
842 		raw_spin_lock(&rt_b->rt_runtime_lock);
843 		raw_spin_lock(&rt_rq->rt_runtime_lock);
844 		rt_rq->rt_runtime = rt_b->rt_runtime;
845 		rt_rq->rt_time = 0;
846 		rt_rq->rt_throttled = 0;
847 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
848 		raw_spin_unlock(&rt_b->rt_runtime_lock);
849 	}
850 }
851 
balance_runtime(struct rt_rq * rt_rq)852 static void balance_runtime(struct rt_rq *rt_rq)
853 {
854 	if (!sched_feat(RT_RUNTIME_SHARE))
855 		return;
856 
857 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
858 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
859 		do_balance_runtime(rt_rq);
860 		raw_spin_lock(&rt_rq->rt_runtime_lock);
861 	}
862 }
863 #else /* !CONFIG_SMP */
balance_runtime(struct rt_rq * rt_rq)864 static inline void balance_runtime(struct rt_rq *rt_rq) {}
865 #endif /* CONFIG_SMP */
866 
do_sched_rt_period_timer(struct rt_bandwidth * rt_b,int overrun)867 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
868 {
869 	int i, idle = 1, throttled = 0;
870 	const struct cpumask *span;
871 
872 	span = sched_rt_period_mask();
873 #ifdef CONFIG_RT_GROUP_SCHED
874 	/*
875 	 * FIXME: isolated CPUs should really leave the root task group,
876 	 * whether they are isolcpus or were isolated via cpusets, lest
877 	 * the timer run on a CPU which does not service all runqueues,
878 	 * potentially leaving other CPUs indefinitely throttled.  If
879 	 * isolation is really required, the user will turn the throttle
880 	 * off to kill the perturbations it causes anyway.  Meanwhile,
881 	 * this maintains functionality for boot and/or troubleshooting.
882 	 */
883 	if (rt_b == &root_task_group.rt_bandwidth)
884 		span = cpu_online_mask;
885 #endif
886 	for_each_cpu(i, span) {
887 		int enqueue = 0;
888 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
889 		struct rq *rq = rq_of_rt_rq(rt_rq);
890 		struct rq_flags rf;
891 		int skip;
892 
893 		/*
894 		 * When span == cpu_online_mask, taking each rq->lock
895 		 * can be time-consuming. Try to avoid it when possible.
896 		 */
897 		raw_spin_lock(&rt_rq->rt_runtime_lock);
898 		if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
899 			rt_rq->rt_runtime = rt_b->rt_runtime;
900 		skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
901 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
902 		if (skip)
903 			continue;
904 
905 		rq_lock(rq, &rf);
906 		update_rq_clock(rq);
907 
908 		if (rt_rq->rt_time) {
909 			u64 runtime;
910 
911 			raw_spin_lock(&rt_rq->rt_runtime_lock);
912 			if (rt_rq->rt_throttled)
913 				balance_runtime(rt_rq);
914 			runtime = rt_rq->rt_runtime;
915 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
916 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
917 				rt_rq->rt_throttled = 0;
918 				enqueue = 1;
919 
920 				/*
921 				 * When we're idle and a woken (rt) task is
922 				 * throttled check_preempt_curr() will set
923 				 * skip_update and the time between the wakeup
924 				 * and this unthrottle will get accounted as
925 				 * 'runtime'.
926 				 */
927 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
928 					rq_clock_cancel_skipupdate(rq);
929 			}
930 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
931 				idle = 0;
932 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
933 		} else if (rt_rq->rt_nr_running) {
934 			idle = 0;
935 			if (!rt_rq_throttled(rt_rq))
936 				enqueue = 1;
937 		}
938 		if (rt_rq->rt_throttled)
939 			throttled = 1;
940 
941 		if (enqueue)
942 			sched_rt_rq_enqueue(rt_rq);
943 		rq_unlock(rq, &rf);
944 	}
945 
946 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
947 		return 1;
948 
949 	return idle;
950 }
951 
rt_se_prio(struct sched_rt_entity * rt_se)952 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
953 {
954 #ifdef CONFIG_RT_GROUP_SCHED
955 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
956 
957 	if (rt_rq)
958 		return rt_rq->highest_prio.curr;
959 #endif
960 
961 	return rt_task_of(rt_se)->prio;
962 }
963 
sched_rt_runtime_exceeded(struct rt_rq * rt_rq)964 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
965 {
966 	u64 runtime = sched_rt_runtime(rt_rq);
967 
968 	if (rt_rq->rt_throttled)
969 		return rt_rq_throttled(rt_rq);
970 
971 	if (runtime >= sched_rt_period(rt_rq))
972 		return 0;
973 
974 	balance_runtime(rt_rq);
975 	runtime = sched_rt_runtime(rt_rq);
976 	if (runtime == RUNTIME_INF)
977 		return 0;
978 
979 	if (rt_rq->rt_time > runtime) {
980 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
981 
982 		/*
983 		 * Don't actually throttle groups that have no runtime assigned
984 		 * but accrue some time due to boosting.
985 		 */
986 		if (likely(rt_b->rt_runtime)) {
987 			rt_rq->rt_throttled = 1;
988 			printk_deferred_once("sched: RT throttling activated\n");
989 
990 			trace_android_vh_dump_throttled_rt_tasks(
991 				raw_smp_processor_id(),
992 				rq_clock(rq_of_rt_rq(rt_rq)),
993 				sched_rt_period(rt_rq),
994 				runtime,
995 				hrtimer_get_expires_ns(&rt_b->rt_period_timer));
996 		} else {
997 			/*
998 			 * In case we did anyway, make it go away,
999 			 * replenishment is a joke, since it will replenish us
1000 			 * with exactly 0 ns.
1001 			 */
1002 			rt_rq->rt_time = 0;
1003 		}
1004 
1005 		if (rt_rq_throttled(rt_rq)) {
1006 			sched_rt_rq_dequeue(rt_rq);
1007 			return 1;
1008 		}
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 /*
1015  * Update the current task's runtime statistics. Skip current tasks that
1016  * are not in our scheduling class.
1017  */
update_curr_rt(struct rq * rq)1018 static void update_curr_rt(struct rq *rq)
1019 {
1020 	struct task_struct *curr = rq->curr;
1021 	struct sched_rt_entity *rt_se = &curr->rt;
1022 	u64 delta_exec;
1023 	u64 now;
1024 
1025 	if (curr->sched_class != &rt_sched_class)
1026 		return;
1027 
1028 	now = rq_clock_task(rq);
1029 	delta_exec = now - curr->se.exec_start;
1030 	if (unlikely((s64)delta_exec <= 0))
1031 		return;
1032 
1033 	schedstat_set(curr->stats.exec_max,
1034 		      max(curr->stats.exec_max, delta_exec));
1035 
1036 	curr->se.sum_exec_runtime += delta_exec;
1037 	account_group_exec_runtime(curr, delta_exec);
1038 
1039 	curr->se.exec_start = now;
1040 	cgroup_account_cputime(curr, delta_exec);
1041 
1042 	trace_android_vh_sched_stat_runtime_rt(curr, delta_exec);
1043 
1044 	if (!rt_bandwidth_enabled())
1045 		return;
1046 
1047 	for_each_sched_rt_entity(rt_se) {
1048 		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1049 		int exceeded;
1050 
1051 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1052 			raw_spin_lock(&rt_rq->rt_runtime_lock);
1053 			rt_rq->rt_time += delta_exec;
1054 			exceeded = sched_rt_runtime_exceeded(rt_rq);
1055 			if (exceeded)
1056 				resched_curr(rq);
1057 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
1058 			if (exceeded)
1059 				do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1060 		}
1061 	}
1062 }
1063 
1064 static void
dequeue_top_rt_rq(struct rt_rq * rt_rq,unsigned int count)1065 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
1066 {
1067 	struct rq *rq = rq_of_rt_rq(rt_rq);
1068 
1069 	BUG_ON(&rq->rt != rt_rq);
1070 
1071 	if (!rt_rq->rt_queued)
1072 		return;
1073 
1074 	BUG_ON(!rq->nr_running);
1075 
1076 	sub_nr_running(rq, count);
1077 	rt_rq->rt_queued = 0;
1078 
1079 }
1080 
1081 static void
enqueue_top_rt_rq(struct rt_rq * rt_rq)1082 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1083 {
1084 	struct rq *rq = rq_of_rt_rq(rt_rq);
1085 
1086 	BUG_ON(&rq->rt != rt_rq);
1087 
1088 	if (rt_rq->rt_queued)
1089 		return;
1090 
1091 	if (rt_rq_throttled(rt_rq))
1092 		return;
1093 
1094 	if (rt_rq->rt_nr_running) {
1095 		add_nr_running(rq, rt_rq->rt_nr_running);
1096 		rt_rq->rt_queued = 1;
1097 	}
1098 
1099 	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1100 	cpufreq_update_util(rq, 0);
1101 }
1102 
1103 #if defined CONFIG_SMP
1104 
1105 static void
inc_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1106 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1107 {
1108 	struct rq *rq = rq_of_rt_rq(rt_rq);
1109 
1110 #ifdef CONFIG_RT_GROUP_SCHED
1111 	/*
1112 	 * Change rq's cpupri only if rt_rq is the top queue.
1113 	 */
1114 	if (&rq->rt != rt_rq)
1115 		return;
1116 #endif
1117 	if (rq->online && prio < prev_prio)
1118 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1119 }
1120 
1121 static void
dec_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1122 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1123 {
1124 	struct rq *rq = rq_of_rt_rq(rt_rq);
1125 
1126 #ifdef CONFIG_RT_GROUP_SCHED
1127 	/*
1128 	 * Change rq's cpupri only if rt_rq is the top queue.
1129 	 */
1130 	if (&rq->rt != rt_rq)
1131 		return;
1132 #endif
1133 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1134 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1135 }
1136 
1137 #else /* CONFIG_SMP */
1138 
1139 static inline
inc_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1140 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1141 static inline
dec_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1142 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1143 
1144 #endif /* CONFIG_SMP */
1145 
1146 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1147 static void
inc_rt_prio(struct rt_rq * rt_rq,int prio)1148 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1149 {
1150 	int prev_prio = rt_rq->highest_prio.curr;
1151 
1152 	if (prio < prev_prio)
1153 		rt_rq->highest_prio.curr = prio;
1154 
1155 	inc_rt_prio_smp(rt_rq, prio, prev_prio);
1156 }
1157 
1158 static void
dec_rt_prio(struct rt_rq * rt_rq,int prio)1159 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1160 {
1161 	int prev_prio = rt_rq->highest_prio.curr;
1162 
1163 	if (rt_rq->rt_nr_running) {
1164 
1165 		WARN_ON(prio < prev_prio);
1166 
1167 		/*
1168 		 * This may have been our highest task, and therefore
1169 		 * we may have some recomputation to do
1170 		 */
1171 		if (prio == prev_prio) {
1172 			struct rt_prio_array *array = &rt_rq->active;
1173 
1174 			rt_rq->highest_prio.curr =
1175 				sched_find_first_bit(array->bitmap);
1176 		}
1177 
1178 	} else {
1179 		rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
1180 	}
1181 
1182 	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1183 }
1184 
1185 #else
1186 
inc_rt_prio(struct rt_rq * rt_rq,int prio)1187 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
dec_rt_prio(struct rt_rq * rt_rq,int prio)1188 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1189 
1190 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1191 
1192 #ifdef CONFIG_RT_GROUP_SCHED
1193 
1194 static void
inc_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1195 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1196 {
1197 	if (rt_se_boosted(rt_se))
1198 		rt_rq->rt_nr_boosted++;
1199 
1200 	if (rt_rq->tg)
1201 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1202 }
1203 
1204 static void
dec_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1205 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1206 {
1207 	if (rt_se_boosted(rt_se))
1208 		rt_rq->rt_nr_boosted--;
1209 
1210 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1211 }
1212 
1213 #else /* CONFIG_RT_GROUP_SCHED */
1214 
1215 static void
inc_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1216 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1217 {
1218 	start_rt_bandwidth(&def_rt_bandwidth);
1219 }
1220 
1221 static inline
dec_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1222 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1223 
1224 #endif /* CONFIG_RT_GROUP_SCHED */
1225 
1226 static inline
rt_se_nr_running(struct sched_rt_entity * rt_se)1227 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1228 {
1229 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1230 
1231 	if (group_rq)
1232 		return group_rq->rt_nr_running;
1233 	else
1234 		return 1;
1235 }
1236 
1237 static inline
rt_se_rr_nr_running(struct sched_rt_entity * rt_se)1238 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1239 {
1240 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1241 	struct task_struct *tsk;
1242 
1243 	if (group_rq)
1244 		return group_rq->rr_nr_running;
1245 
1246 	tsk = rt_task_of(rt_se);
1247 
1248 	return (tsk->policy == SCHED_RR) ? 1 : 0;
1249 }
1250 
1251 static inline
inc_rt_tasks(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1252 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1253 {
1254 	int prio = rt_se_prio(rt_se);
1255 
1256 	WARN_ON(!rt_prio(prio));
1257 	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1258 	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1259 
1260 	inc_rt_prio(rt_rq, prio);
1261 	inc_rt_migration(rt_se, rt_rq);
1262 	inc_rt_group(rt_se, rt_rq);
1263 }
1264 
1265 static inline
dec_rt_tasks(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1266 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1267 {
1268 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1269 	WARN_ON(!rt_rq->rt_nr_running);
1270 	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1271 	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1272 
1273 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1274 	dec_rt_migration(rt_se, rt_rq);
1275 	dec_rt_group(rt_se, rt_rq);
1276 }
1277 
1278 /*
1279  * Change rt_se->run_list location unless SAVE && !MOVE
1280  *
1281  * assumes ENQUEUE/DEQUEUE flags match
1282  */
move_entity(unsigned int flags)1283 static inline bool move_entity(unsigned int flags)
1284 {
1285 	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1286 		return false;
1287 
1288 	return true;
1289 }
1290 
__delist_rt_entity(struct sched_rt_entity * rt_se,struct rt_prio_array * array)1291 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1292 {
1293 	list_del_init(&rt_se->run_list);
1294 
1295 	if (list_empty(array->queue + rt_se_prio(rt_se)))
1296 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1297 
1298 	rt_se->on_list = 0;
1299 }
1300 
__enqueue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1301 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1302 {
1303 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1304 	struct rt_prio_array *array = &rt_rq->active;
1305 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1306 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1307 
1308 	/*
1309 	 * Don't enqueue the group if its throttled, or when empty.
1310 	 * The latter is a consequence of the former when a child group
1311 	 * get throttled and the current group doesn't have any other
1312 	 * active members.
1313 	 */
1314 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1315 		if (rt_se->on_list)
1316 			__delist_rt_entity(rt_se, array);
1317 		return;
1318 	}
1319 
1320 	if (move_entity(flags)) {
1321 		WARN_ON_ONCE(rt_se->on_list);
1322 		if (flags & ENQUEUE_HEAD)
1323 			list_add(&rt_se->run_list, queue);
1324 		else
1325 			list_add_tail(&rt_se->run_list, queue);
1326 
1327 		__set_bit(rt_se_prio(rt_se), array->bitmap);
1328 		rt_se->on_list = 1;
1329 	}
1330 	rt_se->on_rq = 1;
1331 
1332 	inc_rt_tasks(rt_se, rt_rq);
1333 }
1334 
__dequeue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1335 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1336 {
1337 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1338 	struct rt_prio_array *array = &rt_rq->active;
1339 
1340 	if (move_entity(flags)) {
1341 		WARN_ON_ONCE(!rt_se->on_list);
1342 		__delist_rt_entity(rt_se, array);
1343 	}
1344 	rt_se->on_rq = 0;
1345 
1346 	dec_rt_tasks(rt_se, rt_rq);
1347 }
1348 
1349 /*
1350  * Because the prio of an upper entry depends on the lower
1351  * entries, we must remove entries top - down.
1352  */
dequeue_rt_stack(struct sched_rt_entity * rt_se,unsigned int flags)1353 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1354 {
1355 	struct sched_rt_entity *back = NULL;
1356 	unsigned int rt_nr_running;
1357 
1358 	for_each_sched_rt_entity(rt_se) {
1359 		rt_se->back = back;
1360 		back = rt_se;
1361 	}
1362 
1363 	rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
1364 
1365 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1366 		if (on_rt_rq(rt_se))
1367 			__dequeue_rt_entity(rt_se, flags);
1368 	}
1369 
1370 	dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
1371 }
1372 
enqueue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1373 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1374 {
1375 	struct rq *rq = rq_of_rt_se(rt_se);
1376 
1377 	dequeue_rt_stack(rt_se, flags);
1378 	for_each_sched_rt_entity(rt_se)
1379 		__enqueue_rt_entity(rt_se, flags);
1380 	enqueue_top_rt_rq(&rq->rt);
1381 }
1382 
dequeue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1383 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1384 {
1385 	struct rq *rq = rq_of_rt_se(rt_se);
1386 
1387 	dequeue_rt_stack(rt_se, flags);
1388 
1389 	for_each_sched_rt_entity(rt_se) {
1390 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1391 
1392 		if (rt_rq && rt_rq->rt_nr_running)
1393 			__enqueue_rt_entity(rt_se, flags);
1394 	}
1395 	enqueue_top_rt_rq(&rq->rt);
1396 }
1397 
1398 #ifdef CONFIG_SMP
should_honor_rt_sync(struct rq * rq,struct task_struct * p,bool sync)1399 static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
1400 					bool sync)
1401 {
1402 	/*
1403 	 * If the waker is CFS, then an RT sync wakeup would preempt the waker
1404 	 * and force it to run for a likely small time after the RT wakee is
1405 	 * done. So, only honor RT sync wakeups from RT wakers.
1406 	 */
1407 	return sync && task_has_rt_policy(rq->curr) &&
1408 		p->prio <= rq->rt.highest_prio.next &&
1409 		rq->rt.rt_nr_running <= 2;
1410 }
1411 #else
should_honor_rt_sync(struct rq * rq,struct task_struct * p,bool sync)1412 static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
1413 					bool sync)
1414 {
1415 	return 0;
1416 }
1417 #endif
1418 
1419 /*
1420  * Adding/removing a task to/from a priority array:
1421  */
1422 static void
enqueue_task_rt(struct rq * rq,struct task_struct * p,int flags)1423 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1424 {
1425 	struct sched_rt_entity *rt_se = &p->rt;
1426 	bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);
1427 
1428 	if (flags & ENQUEUE_WAKEUP)
1429 		rt_se->timeout = 0;
1430 
1431 	enqueue_rt_entity(rt_se, flags);
1432 
1433 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1 &&
1434 	    !should_honor_rt_sync(rq, p, sync))
1435 		enqueue_pushable_task(rq, p);
1436 }
1437 
dequeue_task_rt(struct rq * rq,struct task_struct * p,int flags)1438 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1439 {
1440 	struct sched_rt_entity *rt_se = &p->rt;
1441 
1442 	update_curr_rt(rq);
1443 	dequeue_rt_entity(rt_se, flags);
1444 
1445 	dequeue_pushable_task(rq, p);
1446 }
1447 
1448 /*
1449  * Put task to the head or the end of the run list without the overhead of
1450  * dequeue followed by enqueue.
1451  */
1452 static void
requeue_rt_entity(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int head)1453 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1454 {
1455 	if (on_rt_rq(rt_se)) {
1456 		struct rt_prio_array *array = &rt_rq->active;
1457 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1458 
1459 		if (head)
1460 			list_move(&rt_se->run_list, queue);
1461 		else
1462 			list_move_tail(&rt_se->run_list, queue);
1463 	}
1464 }
1465 
requeue_task_rt(struct rq * rq,struct task_struct * p,int head)1466 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1467 {
1468 	struct sched_rt_entity *rt_se = &p->rt;
1469 	struct rt_rq *rt_rq;
1470 
1471 	for_each_sched_rt_entity(rt_se) {
1472 		rt_rq = rt_rq_of_se(rt_se);
1473 		requeue_rt_entity(rt_rq, rt_se, head);
1474 	}
1475 }
1476 
yield_task_rt(struct rq * rq)1477 static void yield_task_rt(struct rq *rq)
1478 {
1479 	requeue_task_rt(rq, rq->curr, 0);
1480 }
1481 
1482 #ifdef CONFIG_SMP
1483 static int find_lowest_rq(struct task_struct *task);
1484 
1485 #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
1486 /*
1487  * Return whether the task on the given cpu is currently non-preemptible
1488  * while handling a potentially long softint, or if the task is likely
1489  * to block preemptions soon because it is a ksoftirq thread that is
1490  * handling slow softints.
1491  */
1492 bool
task_may_not_preempt(struct task_struct * task,int cpu)1493 task_may_not_preempt(struct task_struct *task, int cpu)
1494 {
1495 	__u32 softirqs = per_cpu(active_softirqs, cpu) |
1496 			local_softirq_pending();
1497 
1498 	struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
1499 	return ((softirqs & LONG_SOFTIRQ_MASK) &&
1500 		(task == cpu_ksoftirqd ||
1501 		 task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
1502 }
1503 EXPORT_SYMBOL_GPL(task_may_not_preempt);
1504 #endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
1505 
1506 static int
select_task_rq_rt(struct task_struct * p,int cpu,int flags)1507 select_task_rq_rt(struct task_struct *p, int cpu, int flags)
1508 {
1509 	struct task_struct *curr;
1510 	struct rq *rq;
1511 	struct rq *this_cpu_rq;
1512 	bool test;
1513 	int target_cpu = -1;
1514 	bool may_not_preempt;
1515 	bool sync = !!(flags & WF_SYNC);
1516 	int this_cpu;
1517 
1518 	trace_android_rvh_select_task_rq_rt(p, cpu, flags & 0xF,
1519 					flags, &target_cpu);
1520 	if (target_cpu >= 0)
1521 		return target_cpu;
1522 
1523 	/* For anything but wake ups, just return the task_cpu */
1524 	if (!(flags & (WF_TTWU | WF_FORK)))
1525 		goto out;
1526 
1527 	rq = cpu_rq(cpu);
1528 
1529 	rcu_read_lock();
1530 	curr = READ_ONCE(rq->curr); /* unlocked access */
1531 	this_cpu = smp_processor_id();
1532 	this_cpu_rq = cpu_rq(this_cpu);
1533 
1534 	/*
1535 	 * If the current task on @p's runqueue is a softirq task,
1536 	 * it may run without preemption for a time that is
1537 	 * ill-suited for a waiting RT task. Therefore, try to
1538 	 * wake this RT task on another runqueue.
1539 	 *
1540 	 * Also, if the current task on @p's runqueue is an RT task, then
1541 	 * try to see if we can wake this RT task up on another
1542 	 * runqueue. Otherwise simply start this RT task
1543 	 * on its current runqueue.
1544 	 *
1545 	 * We want to avoid overloading runqueues. If the woken
1546 	 * task is a higher priority, then it will stay on this CPU
1547 	 * and the lower prio task should be moved to another CPU.
1548 	 * Even though this will probably make the lower prio task
1549 	 * lose its cache, we do not want to bounce a higher task
1550 	 * around just because it gave up its CPU, perhaps for a
1551 	 * lock?
1552 	 *
1553 	 * For equal prio tasks, we just let the scheduler sort it out.
1554 	 *
1555 	 * Otherwise, just let it ride on the affined RQ and the
1556 	 * post-schedule router will push the preempted task away
1557 	 *
1558 	 * This test is optimistic, if we get it wrong the load-balancer
1559 	 * will have to sort it out.
1560 	 *
1561 	 * We take into account the capacity of the CPU to ensure it fits the
1562 	 * requirement of the task - which is only important on heterogeneous
1563 	 * systems like big.LITTLE.
1564 	 */
1565 	may_not_preempt = task_may_not_preempt(curr, cpu);
1566 	test = (curr && (may_not_preempt ||
1567 			 (unlikely(rt_task(curr)) &&
1568 			  (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio))));
1569 
1570 	/*
1571 	 * Respect the sync flag as long as the task can run on this CPU.
1572 	 */
1573 	if (should_honor_rt_sync(this_cpu_rq, p, sync) &&
1574 	    cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
1575 		cpu = this_cpu;
1576 		goto out_unlock;
1577 	}
1578 
1579 	if (test || !rt_task_fits_capacity(p, cpu)) {
1580 		int target = find_lowest_rq(p);
1581 
1582 		/*
1583 		 * Bail out if we were forcing a migration to find a better
1584 		 * fitting CPU but our search failed.
1585 		 */
1586 		if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1587 			goto out_unlock;
1588 
1589 		/*
1590 		 * If cpu is non-preemptible, prefer remote cpu
1591 		 * even if it's running a higher-prio task.
1592 		 * Otherwise: Don't bother moving it if the destination CPU is
1593 		 * not running a lower priority task.
1594 		 */
1595 		if (target != -1 &&
1596 		    (may_not_preempt ||
1597 		     p->prio < cpu_rq(target)->rt.highest_prio.curr))
1598 			cpu = target;
1599 	}
1600 
1601 out_unlock:
1602 	rcu_read_unlock();
1603 
1604 out:
1605 	return cpu;
1606 }
1607 
check_preempt_equal_prio(struct rq * rq,struct task_struct * p)1608 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1609 {
1610 	/*
1611 	 * Current can't be migrated, useless to reschedule,
1612 	 * let's hope p can move out.
1613 	 */
1614 	if (rq->curr->nr_cpus_allowed == 1 ||
1615 	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1616 		return;
1617 
1618 	/*
1619 	 * p is migratable, so let's not schedule it and
1620 	 * see if it is pushed or pulled somewhere else.
1621 	 */
1622 	if (p->nr_cpus_allowed != 1 &&
1623 	    cpupri_find(&rq->rd->cpupri, p, NULL))
1624 		return;
1625 
1626 	/*
1627 	 * There appear to be other CPUs that can accept
1628 	 * the current task but none can run 'p', so lets reschedule
1629 	 * to try and push the current task away:
1630 	 */
1631 	requeue_task_rt(rq, p, 1);
1632 	resched_curr(rq);
1633 }
1634 
balance_rt(struct rq * rq,struct task_struct * p,struct rq_flags * rf)1635 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1636 {
1637 	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1638 		int done = 0;
1639 
1640 		/*
1641 		 * This is OK, because current is on_cpu, which avoids it being
1642 		 * picked for load-balance and preemption/IRQs are still
1643 		 * disabled avoiding further scheduler activity on it and we've
1644 		 * not yet started the picking loop.
1645 		 */
1646 		rq_unpin_lock(rq, rf);
1647 		trace_android_rvh_sched_balance_rt(rq, p, &done);
1648 		if (!done)
1649 			pull_rt_task(rq);
1650 		rq_repin_lock(rq, rf);
1651 	}
1652 
1653 	return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1654 }
1655 #endif /* CONFIG_SMP */
1656 
1657 /*
1658  * Preempt the current task with a newly woken task if needed:
1659  */
check_preempt_curr_rt(struct rq * rq,struct task_struct * p,int flags)1660 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1661 {
1662 	if (p->prio < rq->curr->prio) {
1663 		resched_curr(rq);
1664 		return;
1665 	}
1666 
1667 #ifdef CONFIG_SMP
1668 	/*
1669 	 * If:
1670 	 *
1671 	 * - the newly woken task is of equal priority to the current task
1672 	 * - the newly woken task is non-migratable while current is migratable
1673 	 * - current will be preempted on the next reschedule
1674 	 *
1675 	 * we should check to see if current can readily move to a different
1676 	 * cpu.  If so, we will reschedule to allow the push logic to try
1677 	 * to move current somewhere else, making room for our non-migratable
1678 	 * task.
1679 	 */
1680 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1681 		check_preempt_equal_prio(rq, p);
1682 #endif
1683 }
1684 
set_next_task_rt(struct rq * rq,struct task_struct * p,bool first)1685 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1686 {
1687 	p->se.exec_start = rq_clock_task(rq);
1688 
1689 	/* The running task is never eligible for pushing */
1690 	dequeue_pushable_task(rq, p);
1691 
1692 	if (!first)
1693 		return;
1694 
1695 	/*
1696 	 * If prev task was rt, put_prev_task() has already updated the
1697 	 * utilization. We only care of the case where we start to schedule a
1698 	 * rt task
1699 	 */
1700 	if (rq->curr->sched_class != &rt_sched_class)
1701 		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1702 	trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 0);
1703 
1704 	rt_queue_push_tasks(rq);
1705 }
1706 
pick_next_rt_entity(struct rt_rq * rt_rq)1707 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
1708 {
1709 	struct rt_prio_array *array = &rt_rq->active;
1710 	struct sched_rt_entity *next = NULL;
1711 	struct list_head *queue;
1712 	int idx;
1713 
1714 	idx = sched_find_first_bit(array->bitmap);
1715 	BUG_ON(idx >= MAX_RT_PRIO);
1716 
1717 	queue = array->queue + idx;
1718 	if (SCHED_WARN_ON(list_empty(queue)))
1719 		return NULL;
1720 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1721 
1722 	return next;
1723 }
1724 
_pick_next_task_rt(struct rq * rq)1725 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1726 {
1727 	struct sched_rt_entity *rt_se;
1728 	struct rt_rq *rt_rq  = &rq->rt;
1729 
1730 	do {
1731 		rt_se = pick_next_rt_entity(rt_rq);
1732 		if (unlikely(!rt_se))
1733 			return NULL;
1734 		rt_rq = group_rt_rq(rt_se);
1735 	} while (rt_rq);
1736 
1737 	return rt_task_of(rt_se);
1738 }
1739 
pick_task_rt(struct rq * rq)1740 static struct task_struct *pick_task_rt(struct rq *rq)
1741 {
1742 	struct task_struct *p;
1743 
1744 	if (!sched_rt_runnable(rq))
1745 		return NULL;
1746 
1747 	p = _pick_next_task_rt(rq);
1748 
1749 	return p;
1750 }
1751 
pick_next_task_rt(struct rq * rq)1752 static struct task_struct *pick_next_task_rt(struct rq *rq)
1753 {
1754 	struct task_struct *p = pick_task_rt(rq);
1755 
1756 	if (p)
1757 		set_next_task_rt(rq, p, true);
1758 
1759 	return p;
1760 }
1761 
put_prev_task_rt(struct rq * rq,struct task_struct * p)1762 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1763 {
1764 	update_curr_rt(rq);
1765 
1766 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1767 	trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1);
1768 
1769 	/*
1770 	 * The previous task needs to be made eligible for pushing
1771 	 * if it is still active
1772 	 */
1773 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1774 		enqueue_pushable_task(rq, p);
1775 }
1776 
1777 #ifdef CONFIG_SMP
1778 
1779 /* Only try algorithms three times */
1780 #define RT_MAX_TRIES 3
1781 
pick_rt_task(struct rq * rq,struct task_struct * p,int cpu)1782 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1783 {
1784 	if (!task_running(rq, p) &&
1785 	    cpumask_test_cpu(cpu, &p->cpus_mask))
1786 		return 1;
1787 
1788 	return 0;
1789 }
1790 
1791 /*
1792  * Return the highest pushable rq's task, which is suitable to be executed
1793  * on the CPU, NULL otherwise
1794  */
pick_highest_pushable_task(struct rq * rq,int cpu)1795 struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1796 {
1797 	struct plist_head *head = &rq->rt.pushable_tasks;
1798 	struct task_struct *p;
1799 
1800 	if (!has_pushable_tasks(rq))
1801 		return NULL;
1802 
1803 	plist_for_each_entry(p, head, pushable_tasks) {
1804 		if (pick_rt_task(rq, p, cpu))
1805 			return p;
1806 	}
1807 
1808 	return NULL;
1809 }
1810 EXPORT_SYMBOL_GPL(pick_highest_pushable_task);
1811 
1812 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1813 
find_lowest_rq(struct task_struct * task)1814 static int find_lowest_rq(struct task_struct *task)
1815 {
1816 	struct sched_domain *sd;
1817 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1818 	int this_cpu = smp_processor_id();
1819 	int cpu      = -1;
1820 	int ret;
1821 
1822 	/* Make sure the mask is initialized first */
1823 	if (unlikely(!lowest_mask))
1824 		return -1;
1825 
1826 	if (task->nr_cpus_allowed == 1)
1827 		return -1; /* No other targets possible */
1828 
1829 	/*
1830 	 * If we're on asym system ensure we consider the different capacities
1831 	 * of the CPUs when searching for the lowest_mask.
1832 	 */
1833 	if (sched_asym_cpucap_active()) {
1834 
1835 		ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1836 					  task, lowest_mask,
1837 					  rt_task_fits_capacity);
1838 	} else {
1839 
1840 		ret = cpupri_find(&task_rq(task)->rd->cpupri,
1841 				  task, lowest_mask);
1842 	}
1843 
1844 	trace_android_rvh_find_lowest_rq(task, lowest_mask, ret, &cpu);
1845 	if (cpu >= 0)
1846 		return cpu;
1847 
1848 	if (!ret)
1849 		return -1; /* No targets found */
1850 
1851 	cpu = task_cpu(task);
1852 
1853 	/*
1854 	 * At this point we have built a mask of CPUs representing the
1855 	 * lowest priority tasks in the system.  Now we want to elect
1856 	 * the best one based on our affinity and topology.
1857 	 *
1858 	 * We prioritize the last CPU that the task executed on since
1859 	 * it is most likely cache-hot in that location.
1860 	 */
1861 	if (cpumask_test_cpu(cpu, lowest_mask))
1862 		return cpu;
1863 
1864 	/*
1865 	 * Otherwise, we consult the sched_domains span maps to figure
1866 	 * out which CPU is logically closest to our hot cache data.
1867 	 */
1868 	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1869 		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1870 
1871 	rcu_read_lock();
1872 	for_each_domain(cpu, sd) {
1873 		if (sd->flags & SD_WAKE_AFFINE) {
1874 			int best_cpu;
1875 
1876 			/*
1877 			 * "this_cpu" is cheaper to preempt than a
1878 			 * remote processor.
1879 			 */
1880 			if (this_cpu != -1 &&
1881 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1882 				rcu_read_unlock();
1883 				return this_cpu;
1884 			}
1885 
1886 			best_cpu = cpumask_any_and_distribute(lowest_mask,
1887 							      sched_domain_span(sd));
1888 			if (best_cpu < nr_cpu_ids) {
1889 				rcu_read_unlock();
1890 				return best_cpu;
1891 			}
1892 		}
1893 	}
1894 	rcu_read_unlock();
1895 
1896 	/*
1897 	 * And finally, if there were no matches within the domains
1898 	 * just give the caller *something* to work with from the compatible
1899 	 * locations.
1900 	 */
1901 	if (this_cpu != -1)
1902 		return this_cpu;
1903 
1904 	cpu = cpumask_any_distribute(lowest_mask);
1905 	if (cpu < nr_cpu_ids)
1906 		return cpu;
1907 
1908 	return -1;
1909 }
1910 
1911 /* Will lock the rq it finds */
find_lock_lowest_rq(struct task_struct * task,struct rq * rq)1912 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1913 {
1914 	struct rq *lowest_rq = NULL;
1915 	int tries;
1916 	int cpu;
1917 
1918 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1919 		cpu = find_lowest_rq(task);
1920 
1921 		if ((cpu == -1) || (cpu == rq->cpu))
1922 			break;
1923 
1924 		lowest_rq = cpu_rq(cpu);
1925 
1926 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1927 			/*
1928 			 * Target rq has tasks of equal or higher priority,
1929 			 * retrying does not release any lock and is unlikely
1930 			 * to yield a different result.
1931 			 */
1932 			lowest_rq = NULL;
1933 			break;
1934 		}
1935 
1936 		/* if the prio of this runqueue changed, try again */
1937 		if (double_lock_balance(rq, lowest_rq)) {
1938 			/*
1939 			 * We had to unlock the run queue. In
1940 			 * the mean time, task could have
1941 			 * migrated already or had its affinity changed.
1942 			 * Also make sure that it wasn't scheduled on its rq.
1943 			 * It is possible the task was scheduled, set
1944 			 * "migrate_disabled" and then got preempted, so we must
1945 			 * check the task migration disable flag here too.
1946 			 */
1947 			if (unlikely(task_rq(task) != rq ||
1948 				     !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
1949 				     task_running(rq, task) ||
1950 				     !rt_task(task) ||
1951 				     is_migration_disabled(task) ||
1952 				     !task_on_rq_queued(task))) {
1953 
1954 				double_unlock_balance(rq, lowest_rq);
1955 				lowest_rq = NULL;
1956 				break;
1957 			}
1958 		}
1959 
1960 		/* If this rq is still suitable use it. */
1961 		if (lowest_rq->rt.highest_prio.curr > task->prio)
1962 			break;
1963 
1964 		/* try again */
1965 		double_unlock_balance(rq, lowest_rq);
1966 		lowest_rq = NULL;
1967 	}
1968 
1969 	return lowest_rq;
1970 }
1971 
pick_next_pushable_task(struct rq * rq)1972 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1973 {
1974 	struct task_struct *p;
1975 
1976 	if (!has_pushable_tasks(rq))
1977 		return NULL;
1978 
1979 	p = plist_first_entry(&rq->rt.pushable_tasks,
1980 			      struct task_struct, pushable_tasks);
1981 
1982 	BUG_ON(rq->cpu != task_cpu(p));
1983 	BUG_ON(task_current(rq, p));
1984 	BUG_ON(p->nr_cpus_allowed <= 1);
1985 
1986 	BUG_ON(!task_on_rq_queued(p));
1987 	BUG_ON(!rt_task(p));
1988 
1989 	return p;
1990 }
1991 
1992 /*
1993  * If the current CPU has more than one RT task, see if the non
1994  * running task can migrate over to a CPU that is running a task
1995  * of lesser priority.
1996  */
push_rt_task(struct rq * rq,bool pull)1997 static int push_rt_task(struct rq *rq, bool pull)
1998 {
1999 	struct task_struct *next_task;
2000 	struct rq *lowest_rq;
2001 	int ret = 0;
2002 
2003 	if (!rq->rt.overloaded)
2004 		return 0;
2005 
2006 	next_task = pick_next_pushable_task(rq);
2007 	if (!next_task)
2008 		return 0;
2009 
2010 retry:
2011 	/*
2012 	 * It's possible that the next_task slipped in of
2013 	 * higher priority than current. If that's the case
2014 	 * just reschedule current.
2015 	 */
2016 	if (unlikely(next_task->prio < rq->curr->prio)) {
2017 		resched_curr(rq);
2018 		return 0;
2019 	}
2020 
2021 	if (is_migration_disabled(next_task)) {
2022 		struct task_struct *push_task = NULL;
2023 		int cpu;
2024 
2025 		if (!pull || rq->push_busy)
2026 			return 0;
2027 
2028 		/*
2029 		 * Invoking find_lowest_rq() on anything but an RT task doesn't
2030 		 * make sense. Per the above priority check, curr has to
2031 		 * be of higher priority than next_task, so no need to
2032 		 * reschedule when bailing out.
2033 		 *
2034 		 * Note that the stoppers are masqueraded as SCHED_FIFO
2035 		 * (cf. sched_set_stop_task()), so we can't rely on rt_task().
2036 		 */
2037 		if (rq->curr->sched_class != &rt_sched_class)
2038 			return 0;
2039 
2040 		cpu = find_lowest_rq(rq->curr);
2041 		if (cpu == -1 || cpu == rq->cpu)
2042 			return 0;
2043 
2044 		/*
2045 		 * Given we found a CPU with lower priority than @next_task,
2046 		 * therefore it should be running. However we cannot migrate it
2047 		 * to this other CPU, instead attempt to push the current
2048 		 * running task on this CPU away.
2049 		 */
2050 		push_task = get_push_task(rq);
2051 		if (push_task) {
2052 			preempt_disable();
2053 			raw_spin_rq_unlock(rq);
2054 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2055 					    push_task, &rq->push_work);
2056 			preempt_enable();
2057 			raw_spin_rq_lock(rq);
2058 		}
2059 
2060 		return 0;
2061 	}
2062 
2063 	if (WARN_ON(next_task == rq->curr))
2064 		return 0;
2065 
2066 	/* We might release rq lock */
2067 	get_task_struct(next_task);
2068 
2069 	/* find_lock_lowest_rq locks the rq if found */
2070 	lowest_rq = find_lock_lowest_rq(next_task, rq);
2071 	if (!lowest_rq) {
2072 		struct task_struct *task;
2073 		/*
2074 		 * find_lock_lowest_rq releases rq->lock
2075 		 * so it is possible that next_task has migrated.
2076 		 *
2077 		 * We need to make sure that the task is still on the same
2078 		 * run-queue and is also still the next task eligible for
2079 		 * pushing.
2080 		 */
2081 		task = pick_next_pushable_task(rq);
2082 		if (task == next_task) {
2083 			/*
2084 			 * The task hasn't migrated, and is still the next
2085 			 * eligible task, but we failed to find a run-queue
2086 			 * to push it to.  Do not retry in this case, since
2087 			 * other CPUs will pull from us when ready.
2088 			 */
2089 			goto out;
2090 		}
2091 
2092 		if (!task)
2093 			/* No more tasks, just exit */
2094 			goto out;
2095 
2096 		/*
2097 		 * Something has shifted, try again.
2098 		 */
2099 		put_task_struct(next_task);
2100 		next_task = task;
2101 		goto retry;
2102 	}
2103 
2104 	deactivate_task(rq, next_task, 0);
2105 	set_task_cpu(next_task, lowest_rq->cpu);
2106 	activate_task(lowest_rq, next_task, 0);
2107 	resched_curr(lowest_rq);
2108 	ret = 1;
2109 
2110 	double_unlock_balance(rq, lowest_rq);
2111 out:
2112 	put_task_struct(next_task);
2113 
2114 	return ret;
2115 }
2116 
push_rt_tasks(struct rq * rq)2117 static void push_rt_tasks(struct rq *rq)
2118 {
2119 	/* push_rt_task will return true if it moved an RT */
2120 	while (push_rt_task(rq, false))
2121 		;
2122 }
2123 
2124 #ifdef HAVE_RT_PUSH_IPI
2125 
2126 /*
2127  * When a high priority task schedules out from a CPU and a lower priority
2128  * task is scheduled in, a check is made to see if there's any RT tasks
2129  * on other CPUs that are waiting to run because a higher priority RT task
2130  * is currently running on its CPU. In this case, the CPU with multiple RT
2131  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
2132  * up that may be able to run one of its non-running queued RT tasks.
2133  *
2134  * All CPUs with overloaded RT tasks need to be notified as there is currently
2135  * no way to know which of these CPUs have the highest priority task waiting
2136  * to run. Instead of trying to take a spinlock on each of these CPUs,
2137  * which has shown to cause large latency when done on machines with many
2138  * CPUs, sending an IPI to the CPUs to have them push off the overloaded
2139  * RT tasks waiting to run.
2140  *
2141  * Just sending an IPI to each of the CPUs is also an issue, as on large
2142  * count CPU machines, this can cause an IPI storm on a CPU, especially
2143  * if its the only CPU with multiple RT tasks queued, and a large number
2144  * of CPUs scheduling a lower priority task at the same time.
2145  *
2146  * Each root domain has its own irq work function that can iterate over
2147  * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
2148  * task must be checked if there's one or many CPUs that are lowering
2149  * their priority, there's a single irq work iterator that will try to
2150  * push off RT tasks that are waiting to run.
2151  *
2152  * When a CPU schedules a lower priority task, it will kick off the
2153  * irq work iterator that will jump to each CPU with overloaded RT tasks.
2154  * As it only takes the first CPU that schedules a lower priority task
2155  * to start the process, the rto_start variable is incremented and if
2156  * the atomic result is one, then that CPU will try to take the rto_lock.
2157  * This prevents high contention on the lock as the process handles all
2158  * CPUs scheduling lower priority tasks.
2159  *
2160  * All CPUs that are scheduling a lower priority task will increment the
2161  * rt_loop_next variable. This will make sure that the irq work iterator
2162  * checks all RT overloaded CPUs whenever a CPU schedules a new lower
2163  * priority task, even if the iterator is in the middle of a scan. Incrementing
2164  * the rt_loop_next will cause the iterator to perform another scan.
2165  *
2166  */
rto_next_cpu(struct root_domain * rd)2167 static int rto_next_cpu(struct root_domain *rd)
2168 {
2169 	int next;
2170 	int cpu;
2171 
2172 	/*
2173 	 * When starting the IPI RT pushing, the rto_cpu is set to -1,
2174 	 * rt_next_cpu() will simply return the first CPU found in
2175 	 * the rto_mask.
2176 	 *
2177 	 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
2178 	 * will return the next CPU found in the rto_mask.
2179 	 *
2180 	 * If there are no more CPUs left in the rto_mask, then a check is made
2181 	 * against rto_loop and rto_loop_next. rto_loop is only updated with
2182 	 * the rto_lock held, but any CPU may increment the rto_loop_next
2183 	 * without any locking.
2184 	 */
2185 	for (;;) {
2186 
2187 		/* When rto_cpu is -1 this acts like cpumask_first() */
2188 		cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
2189 
2190 		/* this will be any CPU in the rd->rto_mask, and can be a halted cpu update it */
2191 		trace_android_rvh_rto_next_cpu(rd->rto_cpu, rd->rto_mask, &cpu);
2192 
2193 		rd->rto_cpu = cpu;
2194 
2195 		if (cpu < nr_cpu_ids)
2196 			return cpu;
2197 
2198 		rd->rto_cpu = -1;
2199 
2200 		/*
2201 		 * ACQUIRE ensures we see the @rto_mask changes
2202 		 * made prior to the @next value observed.
2203 		 *
2204 		 * Matches WMB in rt_set_overload().
2205 		 */
2206 		next = atomic_read_acquire(&rd->rto_loop_next);
2207 
2208 		if (rd->rto_loop == next)
2209 			break;
2210 
2211 		rd->rto_loop = next;
2212 	}
2213 
2214 	return -1;
2215 }
2216 
rto_start_trylock(atomic_t * v)2217 static inline bool rto_start_trylock(atomic_t *v)
2218 {
2219 	return !atomic_cmpxchg_acquire(v, 0, 1);
2220 }
2221 
rto_start_unlock(atomic_t * v)2222 static inline void rto_start_unlock(atomic_t *v)
2223 {
2224 	atomic_set_release(v, 0);
2225 }
2226 
tell_cpu_to_push(struct rq * rq)2227 static void tell_cpu_to_push(struct rq *rq)
2228 {
2229 	int cpu = -1;
2230 
2231 	/* Keep the loop going if the IPI is currently active */
2232 	atomic_inc(&rq->rd->rto_loop_next);
2233 
2234 	/* Only one CPU can initiate a loop at a time */
2235 	if (!rto_start_trylock(&rq->rd->rto_loop_start))
2236 		return;
2237 
2238 	raw_spin_lock(&rq->rd->rto_lock);
2239 
2240 	/*
2241 	 * The rto_cpu is updated under the lock, if it has a valid CPU
2242 	 * then the IPI is still running and will continue due to the
2243 	 * update to loop_next, and nothing needs to be done here.
2244 	 * Otherwise it is finishing up and an ipi needs to be sent.
2245 	 */
2246 	if (rq->rd->rto_cpu < 0)
2247 		cpu = rto_next_cpu(rq->rd);
2248 
2249 	raw_spin_unlock(&rq->rd->rto_lock);
2250 
2251 	rto_start_unlock(&rq->rd->rto_loop_start);
2252 
2253 	if (cpu >= 0) {
2254 		/* Make sure the rd does not get freed while pushing */
2255 		sched_get_rd(rq->rd);
2256 		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2257 	}
2258 }
2259 
2260 /* Called from hardirq context */
rto_push_irq_work_func(struct irq_work * work)2261 void rto_push_irq_work_func(struct irq_work *work)
2262 {
2263 	struct root_domain *rd =
2264 		container_of(work, struct root_domain, rto_push_work);
2265 	struct rq *rq;
2266 	int cpu;
2267 
2268 	rq = this_rq();
2269 
2270 	/*
2271 	 * We do not need to grab the lock to check for has_pushable_tasks.
2272 	 * When it gets updated, a check is made if a push is possible.
2273 	 */
2274 	if (has_pushable_tasks(rq)) {
2275 		raw_spin_rq_lock(rq);
2276 		while (push_rt_task(rq, true))
2277 			;
2278 		raw_spin_rq_unlock(rq);
2279 	}
2280 
2281 	raw_spin_lock(&rd->rto_lock);
2282 
2283 	/* Pass the IPI to the next rt overloaded queue */
2284 	cpu = rto_next_cpu(rd);
2285 
2286 	raw_spin_unlock(&rd->rto_lock);
2287 
2288 	if (cpu < 0) {
2289 		sched_put_rd(rd);
2290 		return;
2291 	}
2292 
2293 	/* Try the next RT overloaded CPU */
2294 	irq_work_queue_on(&rd->rto_push_work, cpu);
2295 }
2296 #endif /* HAVE_RT_PUSH_IPI */
2297 
pull_rt_task(struct rq * this_rq)2298 static void pull_rt_task(struct rq *this_rq)
2299 {
2300 	int this_cpu = this_rq->cpu, cpu;
2301 	bool resched = false;
2302 	struct task_struct *p, *push_task;
2303 	struct rq *src_rq;
2304 	int rt_overload_count = rt_overloaded(this_rq);
2305 
2306 	if (likely(!rt_overload_count))
2307 		return;
2308 
2309 	/*
2310 	 * Match the barrier from rt_set_overloaded; this guarantees that if we
2311 	 * see overloaded we must also see the rto_mask bit.
2312 	 */
2313 	smp_rmb();
2314 
2315 	/* If we are the only overloaded CPU do nothing */
2316 	if (rt_overload_count == 1 &&
2317 	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2318 		return;
2319 
2320 #ifdef HAVE_RT_PUSH_IPI
2321 	if (sched_feat(RT_PUSH_IPI)) {
2322 		tell_cpu_to_push(this_rq);
2323 		return;
2324 	}
2325 #endif
2326 
2327 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
2328 		if (this_cpu == cpu)
2329 			continue;
2330 
2331 		src_rq = cpu_rq(cpu);
2332 
2333 		/*
2334 		 * Don't bother taking the src_rq->lock if the next highest
2335 		 * task is known to be lower-priority than our current task.
2336 		 * This may look racy, but if this value is about to go
2337 		 * logically higher, the src_rq will push this task away.
2338 		 * And if its going logically lower, we do not care
2339 		 */
2340 		if (src_rq->rt.highest_prio.next >=
2341 		    this_rq->rt.highest_prio.curr)
2342 			continue;
2343 
2344 		/*
2345 		 * We can potentially drop this_rq's lock in
2346 		 * double_lock_balance, and another CPU could
2347 		 * alter this_rq
2348 		 */
2349 		push_task = NULL;
2350 		double_lock_balance(this_rq, src_rq);
2351 
2352 		/*
2353 		 * We can pull only a task, which is pushable
2354 		 * on its rq, and no others.
2355 		 */
2356 		p = pick_highest_pushable_task(src_rq, this_cpu);
2357 
2358 		/*
2359 		 * Do we have an RT task that preempts
2360 		 * the to-be-scheduled task?
2361 		 */
2362 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2363 			WARN_ON(p == src_rq->curr);
2364 			WARN_ON(!task_on_rq_queued(p));
2365 
2366 			/*
2367 			 * There's a chance that p is higher in priority
2368 			 * than what's currently running on its CPU.
2369 			 * This is just that p is waking up and hasn't
2370 			 * had a chance to schedule. We only pull
2371 			 * p if it is lower in priority than the
2372 			 * current task on the run queue
2373 			 */
2374 			if (p->prio < src_rq->curr->prio)
2375 				goto skip;
2376 
2377 			if (is_migration_disabled(p)) {
2378 				push_task = get_push_task(src_rq);
2379 			} else {
2380 				deactivate_task(src_rq, p, 0);
2381 				set_task_cpu(p, this_cpu);
2382 				activate_task(this_rq, p, 0);
2383 				resched = true;
2384 			}
2385 			/*
2386 			 * We continue with the search, just in
2387 			 * case there's an even higher prio task
2388 			 * in another runqueue. (low likelihood
2389 			 * but possible)
2390 			 */
2391 		}
2392 skip:
2393 		double_unlock_balance(this_rq, src_rq);
2394 
2395 		if (push_task) {
2396 			preempt_disable();
2397 			raw_spin_rq_unlock(this_rq);
2398 			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2399 					    push_task, &src_rq->push_work);
2400 			preempt_enable();
2401 			raw_spin_rq_lock(this_rq);
2402 		}
2403 	}
2404 
2405 	if (resched)
2406 		resched_curr(this_rq);
2407 }
2408 
2409 /*
2410  * If we are not running and we are not going to reschedule soon, we should
2411  * try to push tasks away now
2412  */
task_woken_rt(struct rq * rq,struct task_struct * p)2413 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2414 {
2415 	bool need_to_push = !task_running(rq, p) &&
2416 			    !test_tsk_need_resched(rq->curr) &&
2417 			    p->nr_cpus_allowed > 1 &&
2418 			    (dl_task(rq->curr) || rt_task(rq->curr)) &&
2419 			    (rq->curr->nr_cpus_allowed < 2 ||
2420 			     rq->curr->prio <= p->prio);
2421 
2422 	if (need_to_push)
2423 		push_rt_tasks(rq);
2424 }
2425 
2426 /* Assumes rq->lock is held */
rq_online_rt(struct rq * rq)2427 static void rq_online_rt(struct rq *rq)
2428 {
2429 	if (rq->rt.overloaded)
2430 		rt_set_overload(rq);
2431 
2432 	__enable_runtime(rq);
2433 
2434 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2435 }
2436 
2437 /* Assumes rq->lock is held */
rq_offline_rt(struct rq * rq)2438 static void rq_offline_rt(struct rq *rq)
2439 {
2440 	if (rq->rt.overloaded)
2441 		rt_clear_overload(rq);
2442 
2443 	__disable_runtime(rq);
2444 
2445 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2446 }
2447 
2448 /*
2449  * When switch from the rt queue, we bring ourselves to a position
2450  * that we might want to pull RT tasks from other runqueues.
2451  */
switched_from_rt(struct rq * rq,struct task_struct * p)2452 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2453 {
2454 	/*
2455 	 * If there are other RT tasks then we will reschedule
2456 	 * and the scheduling of the other RT tasks will handle
2457 	 * the balancing. But if we are the last RT task
2458 	 * we may need to handle the pulling of RT tasks
2459 	 * now.
2460 	 */
2461 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2462 		return;
2463 
2464 	rt_queue_pull_task(rq);
2465 }
2466 
init_sched_rt_class(void)2467 void __init init_sched_rt_class(void)
2468 {
2469 	unsigned int i;
2470 
2471 	for_each_possible_cpu(i) {
2472 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2473 					GFP_KERNEL, cpu_to_node(i));
2474 	}
2475 }
2476 #endif /* CONFIG_SMP */
2477 
2478 /*
2479  * When switching a task to RT, we may overload the runqueue
2480  * with RT tasks. In this case we try to push them off to
2481  * other runqueues.
2482  */
switched_to_rt(struct rq * rq,struct task_struct * p)2483 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2484 {
2485 	/*
2486 	 * If we are running, update the avg_rt tracking, as the running time
2487 	 * will now on be accounted into the latter.
2488 	 */
2489 	if (task_current(rq, p)) {
2490 		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2491 		return;
2492 	}
2493 
2494 	/*
2495 	 * If we are not running we may need to preempt the current
2496 	 * running task. If that current running task is also an RT task
2497 	 * then see if we can move to another run queue.
2498 	 */
2499 	if (task_on_rq_queued(p)) {
2500 #ifdef CONFIG_SMP
2501 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2502 			rt_queue_push_tasks(rq);
2503 #endif /* CONFIG_SMP */
2504 		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2505 			resched_curr(rq);
2506 	}
2507 }
2508 
2509 /*
2510  * Priority of the task has changed. This may cause
2511  * us to initiate a push or pull.
2512  */
2513 static void
prio_changed_rt(struct rq * rq,struct task_struct * p,int oldprio)2514 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2515 {
2516 	if (!task_on_rq_queued(p))
2517 		return;
2518 
2519 	if (task_current(rq, p)) {
2520 #ifdef CONFIG_SMP
2521 		/*
2522 		 * If our priority decreases while running, we
2523 		 * may need to pull tasks to this runqueue.
2524 		 */
2525 		if (oldprio < p->prio)
2526 			rt_queue_pull_task(rq);
2527 
2528 		/*
2529 		 * If there's a higher priority task waiting to run
2530 		 * then reschedule.
2531 		 */
2532 		if (p->prio > rq->rt.highest_prio.curr)
2533 			resched_curr(rq);
2534 #else
2535 		/* For UP simply resched on drop of prio */
2536 		if (oldprio < p->prio)
2537 			resched_curr(rq);
2538 #endif /* CONFIG_SMP */
2539 	} else {
2540 		/*
2541 		 * This task is not running, but if it is
2542 		 * greater than the current running task
2543 		 * then reschedule.
2544 		 */
2545 		if (p->prio < rq->curr->prio)
2546 			resched_curr(rq);
2547 	}
2548 }
2549 
2550 #ifdef CONFIG_POSIX_TIMERS
watchdog(struct rq * rq,struct task_struct * p)2551 static void watchdog(struct rq *rq, struct task_struct *p)
2552 {
2553 	unsigned long soft, hard;
2554 
2555 	/* max may change after cur was read, this will be fixed next tick */
2556 	soft = task_rlimit(p, RLIMIT_RTTIME);
2557 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2558 
2559 	if (soft != RLIM_INFINITY) {
2560 		unsigned long next;
2561 
2562 		if (p->rt.watchdog_stamp != jiffies) {
2563 			p->rt.timeout++;
2564 			p->rt.watchdog_stamp = jiffies;
2565 		}
2566 
2567 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2568 		if (p->rt.timeout > next) {
2569 			posix_cputimers_rt_watchdog(&p->posix_cputimers,
2570 						    p->se.sum_exec_runtime);
2571 		}
2572 	}
2573 }
2574 #else
watchdog(struct rq * rq,struct task_struct * p)2575 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2576 #endif
2577 
2578 /*
2579  * scheduler tick hitting a task of our scheduling class.
2580  *
2581  * NOTE: This function can be called remotely by the tick offload that
2582  * goes along full dynticks. Therefore no local assumption can be made
2583  * and everything must be accessed through the @rq and @curr passed in
2584  * parameters.
2585  */
task_tick_rt(struct rq * rq,struct task_struct * p,int queued)2586 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2587 {
2588 	struct sched_rt_entity *rt_se = &p->rt;
2589 
2590 	update_curr_rt(rq);
2591 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2592 	trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1);
2593 
2594 	watchdog(rq, p);
2595 
2596 	/*
2597 	 * RR tasks need a special form of timeslice management.
2598 	 * FIFO tasks have no timeslices.
2599 	 */
2600 	if (p->policy != SCHED_RR)
2601 		return;
2602 
2603 	if (--p->rt.time_slice)
2604 		return;
2605 
2606 	p->rt.time_slice = sched_rr_timeslice;
2607 
2608 	/*
2609 	 * Requeue to the end of queue if we (and all of our ancestors) are not
2610 	 * the only element on the queue
2611 	 */
2612 	for_each_sched_rt_entity(rt_se) {
2613 		if (rt_se->run_list.prev != rt_se->run_list.next) {
2614 			requeue_task_rt(rq, p, 0);
2615 			resched_curr(rq);
2616 			return;
2617 		}
2618 	}
2619 }
2620 
get_rr_interval_rt(struct rq * rq,struct task_struct * task)2621 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2622 {
2623 	/*
2624 	 * Time slice is 0 for SCHED_FIFO tasks
2625 	 */
2626 	if (task->policy == SCHED_RR)
2627 		return sched_rr_timeslice;
2628 	else
2629 		return 0;
2630 }
2631 
2632 DEFINE_SCHED_CLASS(rt) = {
2633 
2634 	.enqueue_task		= enqueue_task_rt,
2635 	.dequeue_task		= dequeue_task_rt,
2636 	.yield_task		= yield_task_rt,
2637 
2638 	.check_preempt_curr	= check_preempt_curr_rt,
2639 
2640 	.pick_next_task		= pick_next_task_rt,
2641 	.put_prev_task		= put_prev_task_rt,
2642 	.set_next_task          = set_next_task_rt,
2643 
2644 #ifdef CONFIG_SMP
2645 	.balance		= balance_rt,
2646 	.pick_task		= pick_task_rt,
2647 	.select_task_rq		= select_task_rq_rt,
2648 	.set_cpus_allowed       = set_cpus_allowed_common,
2649 	.rq_online              = rq_online_rt,
2650 	.rq_offline             = rq_offline_rt,
2651 	.task_woken		= task_woken_rt,
2652 	.switched_from		= switched_from_rt,
2653 	.find_lock_rq		= find_lock_lowest_rq,
2654 #endif
2655 
2656 	.task_tick		= task_tick_rt,
2657 
2658 	.get_rr_interval	= get_rr_interval_rt,
2659 
2660 	.prio_changed		= prio_changed_rt,
2661 	.switched_to		= switched_to_rt,
2662 
2663 	.update_curr		= update_curr_rt,
2664 
2665 #ifdef CONFIG_UCLAMP_TASK
2666 	.uclamp_enabled		= 1,
2667 #endif
2668 };
2669 
2670 #ifdef CONFIG_RT_GROUP_SCHED
2671 /*
2672  * Ensure that the real time constraints are schedulable.
2673  */
2674 static DEFINE_MUTEX(rt_constraints_mutex);
2675 
tg_has_rt_tasks(struct task_group * tg)2676 static inline int tg_has_rt_tasks(struct task_group *tg)
2677 {
2678 	struct task_struct *task;
2679 	struct css_task_iter it;
2680 	int ret = 0;
2681 
2682 	/*
2683 	 * Autogroups do not have RT tasks; see autogroup_create().
2684 	 */
2685 	if (task_group_is_autogroup(tg))
2686 		return 0;
2687 
2688 	css_task_iter_start(&tg->css, 0, &it);
2689 	while (!ret && (task = css_task_iter_next(&it)))
2690 		ret |= rt_task(task);
2691 	css_task_iter_end(&it);
2692 
2693 	return ret;
2694 }
2695 
2696 struct rt_schedulable_data {
2697 	struct task_group *tg;
2698 	u64 rt_period;
2699 	u64 rt_runtime;
2700 };
2701 
tg_rt_schedulable(struct task_group * tg,void * data)2702 static int tg_rt_schedulable(struct task_group *tg, void *data)
2703 {
2704 	struct rt_schedulable_data *d = data;
2705 	struct task_group *child;
2706 	unsigned long total, sum = 0;
2707 	u64 period, runtime;
2708 
2709 	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2710 	runtime = tg->rt_bandwidth.rt_runtime;
2711 
2712 	if (tg == d->tg) {
2713 		period = d->rt_period;
2714 		runtime = d->rt_runtime;
2715 	}
2716 
2717 	/*
2718 	 * Cannot have more runtime than the period.
2719 	 */
2720 	if (runtime > period && runtime != RUNTIME_INF)
2721 		return -EINVAL;
2722 
2723 	/*
2724 	 * Ensure we don't starve existing RT tasks if runtime turns zero.
2725 	 */
2726 	if (rt_bandwidth_enabled() && !runtime &&
2727 	    tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
2728 		return -EBUSY;
2729 
2730 	total = to_ratio(period, runtime);
2731 
2732 	/*
2733 	 * Nobody can have more than the global setting allows.
2734 	 */
2735 	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2736 		return -EINVAL;
2737 
2738 	/*
2739 	 * The sum of our children's runtime should not exceed our own.
2740 	 */
2741 	list_for_each_entry_rcu(child, &tg->children, siblings) {
2742 		period = ktime_to_ns(child->rt_bandwidth.rt_period);
2743 		runtime = child->rt_bandwidth.rt_runtime;
2744 
2745 		if (child == d->tg) {
2746 			period = d->rt_period;
2747 			runtime = d->rt_runtime;
2748 		}
2749 
2750 		sum += to_ratio(period, runtime);
2751 	}
2752 
2753 	if (sum > total)
2754 		return -EINVAL;
2755 
2756 	return 0;
2757 }
2758 
__rt_schedulable(struct task_group * tg,u64 period,u64 runtime)2759 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2760 {
2761 	int ret;
2762 
2763 	struct rt_schedulable_data data = {
2764 		.tg = tg,
2765 		.rt_period = period,
2766 		.rt_runtime = runtime,
2767 	};
2768 
2769 	rcu_read_lock();
2770 	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2771 	rcu_read_unlock();
2772 
2773 	return ret;
2774 }
2775 
tg_set_rt_bandwidth(struct task_group * tg,u64 rt_period,u64 rt_runtime)2776 static int tg_set_rt_bandwidth(struct task_group *tg,
2777 		u64 rt_period, u64 rt_runtime)
2778 {
2779 	int i, err = 0;
2780 
2781 	/*
2782 	 * Disallowing the root group RT runtime is BAD, it would disallow the
2783 	 * kernel creating (and or operating) RT threads.
2784 	 */
2785 	if (tg == &root_task_group && rt_runtime == 0)
2786 		return -EINVAL;
2787 
2788 	/* No period doesn't make any sense. */
2789 	if (rt_period == 0)
2790 		return -EINVAL;
2791 
2792 	/*
2793 	 * Bound quota to defend quota against overflow during bandwidth shift.
2794 	 */
2795 	if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2796 		return -EINVAL;
2797 
2798 	mutex_lock(&rt_constraints_mutex);
2799 	err = __rt_schedulable(tg, rt_period, rt_runtime);
2800 	if (err)
2801 		goto unlock;
2802 
2803 	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2804 	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2805 	tg->rt_bandwidth.rt_runtime = rt_runtime;
2806 
2807 	for_each_possible_cpu(i) {
2808 		struct rt_rq *rt_rq = tg->rt_rq[i];
2809 
2810 		raw_spin_lock(&rt_rq->rt_runtime_lock);
2811 		rt_rq->rt_runtime = rt_runtime;
2812 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
2813 	}
2814 	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2815 unlock:
2816 	mutex_unlock(&rt_constraints_mutex);
2817 
2818 	return err;
2819 }
2820 
sched_group_set_rt_runtime(struct task_group * tg,long rt_runtime_us)2821 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2822 {
2823 	u64 rt_runtime, rt_period;
2824 
2825 	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2826 	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2827 	if (rt_runtime_us < 0)
2828 		rt_runtime = RUNTIME_INF;
2829 	else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2830 		return -EINVAL;
2831 
2832 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2833 }
2834 
sched_group_rt_runtime(struct task_group * tg)2835 long sched_group_rt_runtime(struct task_group *tg)
2836 {
2837 	u64 rt_runtime_us;
2838 
2839 	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2840 		return -1;
2841 
2842 	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2843 	do_div(rt_runtime_us, NSEC_PER_USEC);
2844 	return rt_runtime_us;
2845 }
2846 
sched_group_set_rt_period(struct task_group * tg,u64 rt_period_us)2847 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2848 {
2849 	u64 rt_runtime, rt_period;
2850 
2851 	if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2852 		return -EINVAL;
2853 
2854 	rt_period = rt_period_us * NSEC_PER_USEC;
2855 	rt_runtime = tg->rt_bandwidth.rt_runtime;
2856 
2857 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2858 }
2859 
sched_group_rt_period(struct task_group * tg)2860 long sched_group_rt_period(struct task_group *tg)
2861 {
2862 	u64 rt_period_us;
2863 
2864 	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2865 	do_div(rt_period_us, NSEC_PER_USEC);
2866 	return rt_period_us;
2867 }
2868 
sched_rt_global_constraints(void)2869 static int sched_rt_global_constraints(void)
2870 {
2871 	int ret = 0;
2872 
2873 	mutex_lock(&rt_constraints_mutex);
2874 	ret = __rt_schedulable(NULL, 0, 0);
2875 	mutex_unlock(&rt_constraints_mutex);
2876 
2877 	return ret;
2878 }
2879 
sched_rt_can_attach(struct task_group * tg,struct task_struct * tsk)2880 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2881 {
2882 	/* Don't accept realtime tasks when there is no way for them to run */
2883 	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2884 		return 0;
2885 
2886 	return 1;
2887 }
2888 
2889 #else /* !CONFIG_RT_GROUP_SCHED */
sched_rt_global_constraints(void)2890 static int sched_rt_global_constraints(void)
2891 {
2892 	unsigned long flags;
2893 	int i;
2894 
2895 	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2896 	for_each_possible_cpu(i) {
2897 		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2898 
2899 		raw_spin_lock(&rt_rq->rt_runtime_lock);
2900 		rt_rq->rt_runtime = global_rt_runtime();
2901 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
2902 	}
2903 	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2904 
2905 	return 0;
2906 }
2907 #endif /* CONFIG_RT_GROUP_SCHED */
2908 
sched_rt_global_validate(void)2909 static int sched_rt_global_validate(void)
2910 {
2911 	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2912 		((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2913 		 ((u64)sysctl_sched_rt_runtime *
2914 			NSEC_PER_USEC > max_rt_runtime)))
2915 		return -EINVAL;
2916 
2917 	return 0;
2918 }
2919 
sched_rt_do_global(void)2920 static void sched_rt_do_global(void)
2921 {
2922 	unsigned long flags;
2923 
2924 	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2925 	def_rt_bandwidth.rt_runtime = global_rt_runtime();
2926 	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
2927 	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2928 }
2929 
sched_rt_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)2930 int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
2931 		size_t *lenp, loff_t *ppos)
2932 {
2933 	int old_period, old_runtime;
2934 	static DEFINE_MUTEX(mutex);
2935 	int ret;
2936 
2937 	mutex_lock(&mutex);
2938 	old_period = sysctl_sched_rt_period;
2939 	old_runtime = sysctl_sched_rt_runtime;
2940 
2941 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2942 
2943 	if (!ret && write) {
2944 		ret = sched_rt_global_validate();
2945 		if (ret)
2946 			goto undo;
2947 
2948 		ret = sched_dl_global_validate();
2949 		if (ret)
2950 			goto undo;
2951 
2952 		ret = sched_rt_global_constraints();
2953 		if (ret)
2954 			goto undo;
2955 
2956 		sched_rt_do_global();
2957 		sched_dl_do_global();
2958 	}
2959 	if (0) {
2960 undo:
2961 		sysctl_sched_rt_period = old_period;
2962 		sysctl_sched_rt_runtime = old_runtime;
2963 	}
2964 	mutex_unlock(&mutex);
2965 
2966 	return ret;
2967 }
2968 
sched_rr_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)2969 int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
2970 		size_t *lenp, loff_t *ppos)
2971 {
2972 	int ret;
2973 	static DEFINE_MUTEX(mutex);
2974 
2975 	mutex_lock(&mutex);
2976 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2977 	/*
2978 	 * Make sure that internally we keep jiffies.
2979 	 * Also, writing zero resets the timeslice to default:
2980 	 */
2981 	if (!ret && write) {
2982 		sched_rr_timeslice =
2983 			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
2984 			msecs_to_jiffies(sysctl_sched_rr_timeslice);
2985 
2986 		if (sysctl_sched_rr_timeslice <= 0)
2987 			sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
2988 	}
2989 	mutex_unlock(&mutex);
2990 
2991 	return ret;
2992 }
2993 
2994 #ifdef CONFIG_SCHED_DEBUG
print_rt_stats(struct seq_file * m,int cpu)2995 void print_rt_stats(struct seq_file *m, int cpu)
2996 {
2997 	rt_rq_iter_t iter;
2998 	struct rt_rq *rt_rq;
2999 
3000 	rcu_read_lock();
3001 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
3002 		print_rt_rq(m, cpu, rt_rq);
3003 	rcu_read_unlock();
3004 }
3005 #endif /* CONFIG_SCHED_DEBUG */
3006