1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4 * policies)
5 */
6 #include "sched.h"
7
8 #include "pelt.h"
9 #include "walt.h"
10
11 int sched_rr_timeslice = RR_TIMESLICE;
12 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
13 /* More than 4 hours if BW_SHIFT equals 20. */
14 static const u64 max_rt_runtime = MAX_BW;
15
16 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
17
18 struct rt_bandwidth def_rt_bandwidth;
19
20 #ifdef CONFIG_SCHED_RT_CAS
21 unsigned int sysctl_sched_enable_rt_cas = 1;
22 #endif
23
24 #ifdef CONFIG_SCHED_RT_ACTIVE_LB
25 unsigned int sysctl_sched_enable_rt_active_lb = 1;
26 #endif
27
sched_rt_period_timer(struct hrtimer * timer)28 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
29 {
30 struct rt_bandwidth *rt_b =
31 container_of(timer, struct rt_bandwidth, rt_period_timer);
32 int idle = 0;
33 int overrun;
34
35 raw_spin_lock(&rt_b->rt_runtime_lock);
36 for (;;) {
37 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
38 if (!overrun)
39 break;
40
41 raw_spin_unlock(&rt_b->rt_runtime_lock);
42 idle = do_sched_rt_period_timer(rt_b, overrun);
43 raw_spin_lock(&rt_b->rt_runtime_lock);
44 }
45 if (idle)
46 rt_b->rt_period_active = 0;
47 raw_spin_unlock(&rt_b->rt_runtime_lock);
48
49 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
50 }
51
init_rt_bandwidth(struct rt_bandwidth * rt_b,u64 period,u64 runtime)52 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
53 {
54 rt_b->rt_period = ns_to_ktime(period);
55 rt_b->rt_runtime = runtime;
56
57 raw_spin_lock_init(&rt_b->rt_runtime_lock);
58
59 hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
60 HRTIMER_MODE_REL_HARD);
61 rt_b->rt_period_timer.function = sched_rt_period_timer;
62 }
63
start_rt_bandwidth(struct rt_bandwidth * rt_b)64 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
65 {
66 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
67 return;
68
69 raw_spin_lock(&rt_b->rt_runtime_lock);
70 if (!rt_b->rt_period_active) {
71 rt_b->rt_period_active = 1;
72 /*
73 * SCHED_DEADLINE updates the bandwidth, as a run away
74 * RT task with a DL task could hog a CPU. But DL does
75 * not reset the period. If a deadline task was running
76 * without an RT task running, it can cause RT tasks to
77 * throttle when they start up. Kick the timer right away
78 * to update the period.
79 */
80 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
81 hrtimer_start_expires(&rt_b->rt_period_timer,
82 HRTIMER_MODE_ABS_PINNED_HARD);
83 }
84 raw_spin_unlock(&rt_b->rt_runtime_lock);
85 }
86
init_rt_rq(struct rt_rq * rt_rq)87 void init_rt_rq(struct rt_rq *rt_rq)
88 {
89 struct rt_prio_array *array;
90 int i;
91
92 array = &rt_rq->active;
93 for (i = 0; i < MAX_RT_PRIO; i++) {
94 INIT_LIST_HEAD(array->queue + i);
95 __clear_bit(i, array->bitmap);
96 }
97 /* delimiter for bitsearch: */
98 __set_bit(MAX_RT_PRIO, array->bitmap);
99
100 #if defined CONFIG_SMP
101 rt_rq->highest_prio.curr = MAX_RT_PRIO;
102 rt_rq->highest_prio.next = MAX_RT_PRIO;
103 rt_rq->rt_nr_migratory = 0;
104 rt_rq->overloaded = 0;
105 plist_head_init(&rt_rq->pushable_tasks);
106 #endif /* CONFIG_SMP */
107 /* We start is dequeued state, because no RT tasks are queued */
108 rt_rq->rt_queued = 0;
109
110 rt_rq->rt_time = 0;
111 rt_rq->rt_throttled = 0;
112 rt_rq->rt_runtime = 0;
113 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
114 }
115
116 #ifdef CONFIG_RT_GROUP_SCHED
destroy_rt_bandwidth(struct rt_bandwidth * rt_b)117 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
118 {
119 hrtimer_cancel(&rt_b->rt_period_timer);
120 }
121
122 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
123
rt_task_of(struct sched_rt_entity * rt_se)124 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
125 {
126 #ifdef CONFIG_SCHED_DEBUG
127 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
128 #endif
129 return container_of(rt_se, struct task_struct, rt);
130 }
131
rq_of_rt_rq(struct rt_rq * rt_rq)132 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
133 {
134 return rt_rq->rq;
135 }
136
rt_rq_of_se(struct sched_rt_entity * rt_se)137 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
138 {
139 return rt_se->rt_rq;
140 }
141
rq_of_rt_se(struct sched_rt_entity * rt_se)142 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
143 {
144 struct rt_rq *rt_rq = rt_se->rt_rq;
145
146 return rt_rq->rq;
147 }
148
free_rt_sched_group(struct task_group * tg)149 void free_rt_sched_group(struct task_group *tg)
150 {
151 int i;
152
153 if (tg->rt_se)
154 destroy_rt_bandwidth(&tg->rt_bandwidth);
155
156 for_each_possible_cpu(i) {
157 if (tg->rt_rq)
158 kfree(tg->rt_rq[i]);
159 if (tg->rt_se)
160 kfree(tg->rt_se[i]);
161 }
162
163 kfree(tg->rt_rq);
164 kfree(tg->rt_se);
165 }
166
init_tg_rt_entry(struct task_group * tg,struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int cpu,struct sched_rt_entity * parent)167 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
168 struct sched_rt_entity *rt_se, int cpu,
169 struct sched_rt_entity *parent)
170 {
171 struct rq *rq = cpu_rq(cpu);
172
173 rt_rq->highest_prio.curr = MAX_RT_PRIO;
174 rt_rq->rt_nr_boosted = 0;
175 rt_rq->rq = rq;
176 rt_rq->tg = tg;
177
178 tg->rt_rq[cpu] = rt_rq;
179 tg->rt_se[cpu] = rt_se;
180
181 if (!rt_se)
182 return;
183
184 if (!parent)
185 rt_se->rt_rq = &rq->rt;
186 else
187 rt_se->rt_rq = parent->my_q;
188
189 rt_se->my_q = rt_rq;
190 rt_se->parent = parent;
191 INIT_LIST_HEAD(&rt_se->run_list);
192 }
193
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)194 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
195 {
196 struct rt_rq *rt_rq;
197 struct sched_rt_entity *rt_se;
198 int i;
199
200 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
201 if (!tg->rt_rq)
202 goto err;
203 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
204 if (!tg->rt_se)
205 goto err;
206
207 init_rt_bandwidth(&tg->rt_bandwidth,
208 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
209
210 for_each_possible_cpu(i) {
211 rt_rq = kzalloc_node(sizeof(struct rt_rq),
212 GFP_KERNEL, cpu_to_node(i));
213 if (!rt_rq)
214 goto err;
215
216 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
217 GFP_KERNEL, cpu_to_node(i));
218 if (!rt_se)
219 goto err_free_rq;
220
221 init_rt_rq(rt_rq);
222 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
223 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
224 }
225
226 return 1;
227
228 err_free_rq:
229 kfree(rt_rq);
230 err:
231 return 0;
232 }
233
234 #else /* CONFIG_RT_GROUP_SCHED */
235
236 #define rt_entity_is_task(rt_se) (1)
237
rt_task_of(struct sched_rt_entity * rt_se)238 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
239 {
240 return container_of(rt_se, struct task_struct, rt);
241 }
242
rq_of_rt_rq(struct rt_rq * rt_rq)243 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
244 {
245 return container_of(rt_rq, struct rq, rt);
246 }
247
rq_of_rt_se(struct sched_rt_entity * rt_se)248 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
249 {
250 struct task_struct *p = rt_task_of(rt_se);
251
252 return task_rq(p);
253 }
254
rt_rq_of_se(struct sched_rt_entity * rt_se)255 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
256 {
257 struct rq *rq = rq_of_rt_se(rt_se);
258
259 return &rq->rt;
260 }
261
free_rt_sched_group(struct task_group * tg)262 void free_rt_sched_group(struct task_group *tg) { }
263
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)264 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
265 {
266 return 1;
267 }
268 #endif /* CONFIG_RT_GROUP_SCHED */
269
270 #ifdef CONFIG_SMP
271
272 static void pull_rt_task(struct rq *this_rq);
273
need_pull_rt_task(struct rq * rq,struct task_struct * prev)274 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
275 {
276 /*
277 * Try to pull RT tasks here if we lower this rq's prio and cpu is not
278 * isolated
279 */
280 return rq->rt.highest_prio.curr > prev->prio &&
281 !cpu_isolated(cpu_of(rq));
282 }
283
rt_overloaded(struct rq * rq)284 static inline int rt_overloaded(struct rq *rq)
285 {
286 return atomic_read(&rq->rd->rto_count);
287 }
288
rt_set_overload(struct rq * rq)289 static inline void rt_set_overload(struct rq *rq)
290 {
291 if (!rq->online)
292 return;
293
294 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
295 /*
296 * Make sure the mask is visible before we set
297 * the overload count. That is checked to determine
298 * if we should look at the mask. It would be a shame
299 * if we looked at the mask, but the mask was not
300 * updated yet.
301 *
302 * Matched by the barrier in pull_rt_task().
303 */
304 smp_wmb();
305 atomic_inc(&rq->rd->rto_count);
306 }
307
rt_clear_overload(struct rq * rq)308 static inline void rt_clear_overload(struct rq *rq)
309 {
310 if (!rq->online)
311 return;
312
313 /* the order here really doesn't matter */
314 atomic_dec(&rq->rd->rto_count);
315 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
316 }
317
update_rt_migration(struct rt_rq * rt_rq)318 static void update_rt_migration(struct rt_rq *rt_rq)
319 {
320 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
321 if (!rt_rq->overloaded) {
322 rt_set_overload(rq_of_rt_rq(rt_rq));
323 rt_rq->overloaded = 1;
324 }
325 } else if (rt_rq->overloaded) {
326 rt_clear_overload(rq_of_rt_rq(rt_rq));
327 rt_rq->overloaded = 0;
328 }
329 }
330
inc_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)331 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
332 {
333 struct task_struct *p;
334
335 if (!rt_entity_is_task(rt_se))
336 return;
337
338 p = rt_task_of(rt_se);
339 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
340
341 rt_rq->rt_nr_total++;
342 if (p->nr_cpus_allowed > 1)
343 rt_rq->rt_nr_migratory++;
344
345 update_rt_migration(rt_rq);
346 }
347
dec_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)348 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
349 {
350 struct task_struct *p;
351
352 if (!rt_entity_is_task(rt_se))
353 return;
354
355 p = rt_task_of(rt_se);
356 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
357
358 rt_rq->rt_nr_total--;
359 if (p->nr_cpus_allowed > 1)
360 rt_rq->rt_nr_migratory--;
361
362 update_rt_migration(rt_rq);
363 }
364
has_pushable_tasks(struct rq * rq)365 static inline int has_pushable_tasks(struct rq *rq)
366 {
367 return !plist_head_empty(&rq->rt.pushable_tasks);
368 }
369
370 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
371 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
372
373 static void push_rt_tasks(struct rq *);
374 static void pull_rt_task(struct rq *);
375
rt_queue_push_tasks(struct rq * rq)376 static inline void rt_queue_push_tasks(struct rq *rq)
377 {
378 if (!has_pushable_tasks(rq))
379 return;
380
381 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
382 }
383
rt_queue_pull_task(struct rq * rq)384 static inline void rt_queue_pull_task(struct rq *rq)
385 {
386 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
387 }
388
enqueue_pushable_task(struct rq * rq,struct task_struct * p)389 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
390 {
391 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
392 plist_node_init(&p->pushable_tasks, p->prio);
393 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
394
395 /* Update the highest prio pushable task */
396 if (p->prio < rq->rt.highest_prio.next)
397 rq->rt.highest_prio.next = p->prio;
398 }
399
dequeue_pushable_task(struct rq * rq,struct task_struct * p)400 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
401 {
402 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
403
404 /* Update the new highest prio pushable task */
405 if (has_pushable_tasks(rq)) {
406 p = plist_first_entry(&rq->rt.pushable_tasks,
407 struct task_struct, pushable_tasks);
408 rq->rt.highest_prio.next = p->prio;
409 } else
410 rq->rt.highest_prio.next = MAX_RT_PRIO;
411 }
412
413 #else
414
enqueue_pushable_task(struct rq * rq,struct task_struct * p)415 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
416 {
417 }
418
dequeue_pushable_task(struct rq * rq,struct task_struct * p)419 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
420 {
421 }
422
423 static inline
inc_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)424 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
425 {
426 }
427
428 static inline
dec_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)429 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
430 {
431 }
432
need_pull_rt_task(struct rq * rq,struct task_struct * prev)433 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
434 {
435 return false;
436 }
437
pull_rt_task(struct rq * this_rq)438 static inline void pull_rt_task(struct rq *this_rq)
439 {
440 }
441
rt_queue_push_tasks(struct rq * rq)442 static inline void rt_queue_push_tasks(struct rq *rq)
443 {
444 }
445 #endif /* CONFIG_SMP */
446
447 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
448 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
449
on_rt_rq(struct sched_rt_entity * rt_se)450 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
451 {
452 return rt_se->on_rq;
453 }
454
455 #ifdef CONFIG_UCLAMP_TASK
456 /*
457 * Verify the fitness of task @p to run on @cpu taking into account the uclamp
458 * settings.
459 *
460 * This check is only important for heterogeneous systems where uclamp_min value
461 * is higher than the capacity of a @cpu. For non-heterogeneous system this
462 * function will always return true.
463 *
464 * The function will return true if the capacity of the @cpu is >= the
465 * uclamp_min and false otherwise.
466 *
467 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
468 * > uclamp_max.
469 */
rt_task_fits_capacity(struct task_struct * p,int cpu)470 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
471 {
472 unsigned int min_cap;
473 unsigned int max_cap;
474 unsigned int cpu_cap;
475
476 /* Only heterogeneous systems can benefit from this check */
477 if (!static_branch_unlikely(&sched_asym_cpucapacity))
478 return true;
479
480 min_cap = uclamp_eff_value(p, UCLAMP_MIN);
481 max_cap = uclamp_eff_value(p, UCLAMP_MAX);
482
483 cpu_cap = capacity_orig_of(cpu);
484
485 return cpu_cap >= min(min_cap, max_cap);
486 }
487 #else
rt_task_fits_capacity(struct task_struct * p,int cpu)488 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
489 {
490 return true;
491 }
492 #endif
493
494 #ifdef CONFIG_RT_GROUP_SCHED
495
sched_rt_runtime(struct rt_rq * rt_rq)496 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
497 {
498 if (!rt_rq->tg)
499 return RUNTIME_INF;
500
501 return rt_rq->rt_runtime;
502 }
503
sched_rt_period(struct rt_rq * rt_rq)504 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
505 {
506 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
507 }
508
509 typedef struct task_group *rt_rq_iter_t;
510
next_task_group(struct task_group * tg)511 static inline struct task_group *next_task_group(struct task_group *tg)
512 {
513 do {
514 tg = list_entry_rcu(tg->list.next,
515 typeof(struct task_group), list);
516 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
517
518 if (&tg->list == &task_groups)
519 tg = NULL;
520
521 return tg;
522 }
523
524 #define for_each_rt_rq(rt_rq, iter, rq) \
525 for (iter = container_of(&task_groups, typeof(*iter), list); \
526 (iter = next_task_group(iter)) && \
527 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
528
529 #define for_each_sched_rt_entity(rt_se) \
530 for (; rt_se; rt_se = rt_se->parent)
531
group_rt_rq(struct sched_rt_entity * rt_se)532 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
533 {
534 return rt_se->my_q;
535 }
536
537 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
538 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
539
sched_rt_rq_enqueue(struct rt_rq * rt_rq)540 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
541 {
542 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
543 struct rq *rq = rq_of_rt_rq(rt_rq);
544 struct sched_rt_entity *rt_se;
545
546 int cpu = cpu_of(rq);
547
548 rt_se = rt_rq->tg->rt_se[cpu];
549
550 if (rt_rq->rt_nr_running) {
551 if (!rt_se)
552 enqueue_top_rt_rq(rt_rq);
553 else if (!on_rt_rq(rt_se))
554 enqueue_rt_entity(rt_se, 0);
555
556 if (rt_rq->highest_prio.curr < curr->prio)
557 resched_curr(rq);
558 }
559 }
560
sched_rt_rq_dequeue(struct rt_rq * rt_rq)561 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
562 {
563 struct sched_rt_entity *rt_se;
564 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
565
566 rt_se = rt_rq->tg->rt_se[cpu];
567
568 if (!rt_se) {
569 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
570 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
571 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
572 }
573 else if (on_rt_rq(rt_se))
574 dequeue_rt_entity(rt_se, 0);
575 }
576
rt_rq_throttled(struct rt_rq * rt_rq)577 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
578 {
579 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
580 }
581
rt_se_boosted(struct sched_rt_entity * rt_se)582 static int rt_se_boosted(struct sched_rt_entity *rt_se)
583 {
584 struct rt_rq *rt_rq = group_rt_rq(rt_se);
585 struct task_struct *p;
586
587 if (rt_rq)
588 return !!rt_rq->rt_nr_boosted;
589
590 p = rt_task_of(rt_se);
591 return p->prio != p->normal_prio;
592 }
593
594 #ifdef CONFIG_SMP
sched_rt_period_mask(void)595 static inline const struct cpumask *sched_rt_period_mask(void)
596 {
597 return this_rq()->rd->span;
598 }
599 #else
sched_rt_period_mask(void)600 static inline const struct cpumask *sched_rt_period_mask(void)
601 {
602 return cpu_online_mask;
603 }
604 #endif
605
606 static inline
sched_rt_period_rt_rq(struct rt_bandwidth * rt_b,int cpu)607 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
608 {
609 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
610 }
611
sched_rt_bandwidth(struct rt_rq * rt_rq)612 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
613 {
614 return &rt_rq->tg->rt_bandwidth;
615 }
616
617 #else /* !CONFIG_RT_GROUP_SCHED */
618
sched_rt_runtime(struct rt_rq * rt_rq)619 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
620 {
621 return rt_rq->rt_runtime;
622 }
623
sched_rt_period(struct rt_rq * rt_rq)624 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
625 {
626 return ktime_to_ns(def_rt_bandwidth.rt_period);
627 }
628
629 typedef struct rt_rq *rt_rq_iter_t;
630
631 #define for_each_rt_rq(rt_rq, iter, rq) \
632 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
633
634 #define for_each_sched_rt_entity(rt_se) \
635 for (; rt_se; rt_se = NULL)
636
group_rt_rq(struct sched_rt_entity * rt_se)637 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
638 {
639 return NULL;
640 }
641
sched_rt_rq_enqueue(struct rt_rq * rt_rq)642 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
643 {
644 struct rq *rq = rq_of_rt_rq(rt_rq);
645
646 if (!rt_rq->rt_nr_running)
647 return;
648
649 enqueue_top_rt_rq(rt_rq);
650 resched_curr(rq);
651 }
652
sched_rt_rq_dequeue(struct rt_rq * rt_rq)653 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
654 {
655 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
656 }
657
rt_rq_throttled(struct rt_rq * rt_rq)658 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
659 {
660 return rt_rq->rt_throttled;
661 }
662
sched_rt_period_mask(void)663 static inline const struct cpumask *sched_rt_period_mask(void)
664 {
665 return cpu_online_mask;
666 }
667
668 static inline
sched_rt_period_rt_rq(struct rt_bandwidth * rt_b,int cpu)669 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
670 {
671 return &cpu_rq(cpu)->rt;
672 }
673
sched_rt_bandwidth(struct rt_rq * rt_rq)674 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
675 {
676 return &def_rt_bandwidth;
677 }
678
679 #endif /* CONFIG_RT_GROUP_SCHED */
680
sched_rt_bandwidth_account(struct rt_rq * rt_rq)681 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
682 {
683 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
684
685 return (hrtimer_active(&rt_b->rt_period_timer) ||
686 rt_rq->rt_time < rt_b->rt_runtime);
687 }
688
689 #ifdef CONFIG_SMP
690 /*
691 * We ran out of runtime, see if we can borrow some from our neighbours.
692 */
do_balance_runtime(struct rt_rq * rt_rq)693 static void do_balance_runtime(struct rt_rq *rt_rq)
694 {
695 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
696 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
697 int i, weight;
698 u64 rt_period;
699
700 weight = cpumask_weight(rd->span);
701
702 raw_spin_lock(&rt_b->rt_runtime_lock);
703 rt_period = ktime_to_ns(rt_b->rt_period);
704 for_each_cpu(i, rd->span) {
705 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
706 s64 diff;
707
708 if (iter == rt_rq)
709 continue;
710
711 raw_spin_lock(&iter->rt_runtime_lock);
712 /*
713 * Either all rqs have inf runtime and there's nothing to steal
714 * or __disable_runtime() below sets a specific rq to inf to
715 * indicate its been disabled and disalow stealing.
716 */
717 if (iter->rt_runtime == RUNTIME_INF)
718 goto next;
719
720 /*
721 * From runqueues with spare time, take 1/n part of their
722 * spare time, but no more than our period.
723 */
724 diff = iter->rt_runtime - iter->rt_time;
725 if (diff > 0) {
726 diff = div_u64((u64)diff, weight);
727 if (rt_rq->rt_runtime + diff > rt_period)
728 diff = rt_period - rt_rq->rt_runtime;
729 iter->rt_runtime -= diff;
730 rt_rq->rt_runtime += diff;
731 if (rt_rq->rt_runtime == rt_period) {
732 raw_spin_unlock(&iter->rt_runtime_lock);
733 break;
734 }
735 }
736 next:
737 raw_spin_unlock(&iter->rt_runtime_lock);
738 }
739 raw_spin_unlock(&rt_b->rt_runtime_lock);
740 }
741
742 /*
743 * Ensure this RQ takes back all the runtime it lend to its neighbours.
744 */
__disable_runtime(struct rq * rq)745 static void __disable_runtime(struct rq *rq)
746 {
747 struct root_domain *rd = rq->rd;
748 rt_rq_iter_t iter;
749 struct rt_rq *rt_rq;
750
751 if (unlikely(!scheduler_running))
752 return;
753
754 for_each_rt_rq(rt_rq, iter, rq) {
755 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
756 s64 want;
757 int i;
758
759 raw_spin_lock(&rt_b->rt_runtime_lock);
760 raw_spin_lock(&rt_rq->rt_runtime_lock);
761 /*
762 * Either we're all inf and nobody needs to borrow, or we're
763 * already disabled and thus have nothing to do, or we have
764 * exactly the right amount of runtime to take out.
765 */
766 if (rt_rq->rt_runtime == RUNTIME_INF ||
767 rt_rq->rt_runtime == rt_b->rt_runtime)
768 goto balanced;
769 raw_spin_unlock(&rt_rq->rt_runtime_lock);
770
771 /*
772 * Calculate the difference between what we started out with
773 * and what we current have, that's the amount of runtime
774 * we lend and now have to reclaim.
775 */
776 want = rt_b->rt_runtime - rt_rq->rt_runtime;
777
778 /*
779 * Greedy reclaim, take back as much as we can.
780 */
781 for_each_cpu(i, rd->span) {
782 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
783 s64 diff;
784
785 /*
786 * Can't reclaim from ourselves or disabled runqueues.
787 */
788 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
789 continue;
790
791 raw_spin_lock(&iter->rt_runtime_lock);
792 if (want > 0) {
793 diff = min_t(s64, iter->rt_runtime, want);
794 iter->rt_runtime -= diff;
795 want -= diff;
796 } else {
797 iter->rt_runtime -= want;
798 want -= want;
799 }
800 raw_spin_unlock(&iter->rt_runtime_lock);
801
802 if (!want)
803 break;
804 }
805
806 raw_spin_lock(&rt_rq->rt_runtime_lock);
807 /*
808 * We cannot be left wanting - that would mean some runtime
809 * leaked out of the system.
810 */
811 BUG_ON(want);
812 balanced:
813 /*
814 * Disable all the borrow logic by pretending we have inf
815 * runtime - in which case borrowing doesn't make sense.
816 */
817 rt_rq->rt_runtime = RUNTIME_INF;
818 rt_rq->rt_throttled = 0;
819 raw_spin_unlock(&rt_rq->rt_runtime_lock);
820 raw_spin_unlock(&rt_b->rt_runtime_lock);
821
822 /* Make rt_rq available for pick_next_task() */
823 sched_rt_rq_enqueue(rt_rq);
824 }
825 }
826
__enable_runtime(struct rq * rq)827 static void __enable_runtime(struct rq *rq)
828 {
829 rt_rq_iter_t iter;
830 struct rt_rq *rt_rq;
831
832 if (unlikely(!scheduler_running))
833 return;
834
835 /*
836 * Reset each runqueue's bandwidth settings
837 */
838 for_each_rt_rq(rt_rq, iter, rq) {
839 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
840
841 raw_spin_lock(&rt_b->rt_runtime_lock);
842 raw_spin_lock(&rt_rq->rt_runtime_lock);
843 rt_rq->rt_runtime = rt_b->rt_runtime;
844 rt_rq->rt_time = 0;
845 rt_rq->rt_throttled = 0;
846 raw_spin_unlock(&rt_rq->rt_runtime_lock);
847 raw_spin_unlock(&rt_b->rt_runtime_lock);
848 }
849 }
850
balance_runtime(struct rt_rq * rt_rq)851 static void balance_runtime(struct rt_rq *rt_rq)
852 {
853 if (!sched_feat(RT_RUNTIME_SHARE))
854 return;
855
856 if (rt_rq->rt_time > rt_rq->rt_runtime) {
857 raw_spin_unlock(&rt_rq->rt_runtime_lock);
858 do_balance_runtime(rt_rq);
859 raw_spin_lock(&rt_rq->rt_runtime_lock);
860 }
861 }
862 #else /* !CONFIG_SMP */
balance_runtime(struct rt_rq * rt_rq)863 static inline void balance_runtime(struct rt_rq *rt_rq) {}
864 #endif /* CONFIG_SMP */
865
do_sched_rt_period_timer(struct rt_bandwidth * rt_b,int overrun)866 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
867 {
868 int i, idle = 1, throttled = 0;
869 const struct cpumask *span;
870
871 span = sched_rt_period_mask();
872 #ifdef CONFIG_RT_GROUP_SCHED
873 /*
874 * When the tasks in the task_group run on either isolated
875 * CPUs or non-isolated CPUs, whether they are isolcpus or
876 * were isolated via cpusets, check all the online rt_rq
877 * to lest the timer run on a CPU which does not service
878 * all runqueues, potentially leaving other CPUs indefinitely
879 * throttled.
880 */
881 span = cpu_online_mask;
882 #endif
883 for_each_cpu(i, span) {
884 int enqueue = 0;
885 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
886 struct rq *rq = rq_of_rt_rq(rt_rq);
887 int skip;
888
889 /*
890 * When span == cpu_online_mask, taking each rq->lock
891 * can be time-consuming. Try to avoid it when possible.
892 */
893 raw_spin_lock(&rt_rq->rt_runtime_lock);
894 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
895 rt_rq->rt_runtime = rt_b->rt_runtime;
896 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
897 raw_spin_unlock(&rt_rq->rt_runtime_lock);
898 if (skip)
899 continue;
900
901 raw_spin_lock(&rq->lock);
902 update_rq_clock(rq);
903
904 if (rt_rq->rt_time) {
905 u64 runtime;
906
907 raw_spin_lock(&rt_rq->rt_runtime_lock);
908 if (rt_rq->rt_throttled)
909 balance_runtime(rt_rq);
910 runtime = rt_rq->rt_runtime;
911 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
912 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
913 rt_rq->rt_throttled = 0;
914 enqueue = 1;
915
916 /*
917 * When we're idle and a woken (rt) task is
918 * throttled check_preempt_curr() will set
919 * skip_update and the time between the wakeup
920 * and this unthrottle will get accounted as
921 * 'runtime'.
922 */
923 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
924 rq_clock_cancel_skipupdate(rq);
925 }
926 if (rt_rq->rt_time || rt_rq->rt_nr_running)
927 idle = 0;
928 raw_spin_unlock(&rt_rq->rt_runtime_lock);
929 } else if (rt_rq->rt_nr_running) {
930 idle = 0;
931 if (!rt_rq_throttled(rt_rq))
932 enqueue = 1;
933 }
934 if (rt_rq->rt_throttled)
935 throttled = 1;
936
937 if (enqueue)
938 sched_rt_rq_enqueue(rt_rq);
939 raw_spin_unlock(&rq->lock);
940 }
941
942 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
943 return 1;
944
945 return idle;
946 }
947
rt_se_prio(struct sched_rt_entity * rt_se)948 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
949 {
950 #ifdef CONFIG_RT_GROUP_SCHED
951 struct rt_rq *rt_rq = group_rt_rq(rt_se);
952
953 if (rt_rq)
954 return rt_rq->highest_prio.curr;
955 #endif
956
957 return rt_task_of(rt_se)->prio;
958 }
959
try_start_rt_bandwidth(struct rt_bandwidth * rt_b)960 static inline void try_start_rt_bandwidth(struct rt_bandwidth *rt_b)
961 {
962 raw_spin_lock(&rt_b->rt_runtime_lock);
963 if (!rt_b->rt_period_active) {
964 rt_b->rt_period_active = 1;
965 hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
966 hrtimer_start_expires(&rt_b->rt_period_timer,
967 HRTIMER_MODE_ABS_PINNED_HARD);
968 }
969 raw_spin_unlock(&rt_b->rt_runtime_lock);
970 }
971
sched_rt_runtime_exceeded(struct rt_rq * rt_rq)972 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
973 {
974 u64 runtime = sched_rt_runtime(rt_rq);
975
976 if (rt_rq->rt_throttled)
977 return rt_rq_throttled(rt_rq);
978
979 if (runtime >= sched_rt_period(rt_rq))
980 return 0;
981
982 balance_runtime(rt_rq);
983 runtime = sched_rt_runtime(rt_rq);
984 if (runtime == RUNTIME_INF)
985 return 0;
986
987 if (rt_rq->rt_time > runtime) {
988 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
989
990 /*
991 * Don't actually throttle groups that have no runtime assigned
992 * but accrue some time due to boosting.
993 */
994 if (likely(rt_b->rt_runtime)) {
995 rt_rq->rt_throttled = 1;
996 printk_deferred_once("sched: RT throttling activated\n");
997 } else {
998 /*
999 * In case we did anyway, make it go away,
1000 * replenishment is a joke, since it will replenish us
1001 * with exactly 0 ns.
1002 */
1003 rt_rq->rt_time = 0;
1004 }
1005
1006 if (rt_rq_throttled(rt_rq)) {
1007 sched_rt_rq_dequeue(rt_rq);
1008 return 1;
1009 }
1010 }
1011
1012 return 0;
1013 }
1014
1015 /*
1016 * Update the current task's runtime statistics. Skip current tasks that
1017 * are not in our scheduling class.
1018 */
update_curr_rt(struct rq * rq)1019 static void update_curr_rt(struct rq *rq)
1020 {
1021 struct task_struct *curr = rq->curr;
1022 struct sched_rt_entity *rt_se = &curr->rt;
1023 u64 delta_exec;
1024 u64 now;
1025
1026 if (curr->sched_class != &rt_sched_class)
1027 return;
1028
1029 now = rq_clock_task(rq);
1030 delta_exec = now - curr->se.exec_start;
1031 if (unlikely((s64)delta_exec <= 0))
1032 return;
1033
1034 schedstat_set(curr->se.statistics.exec_max,
1035 max(curr->se.statistics.exec_max, delta_exec));
1036
1037 curr->se.sum_exec_runtime += delta_exec;
1038 account_group_exec_runtime(curr, delta_exec);
1039
1040 curr->se.exec_start = now;
1041 cgroup_account_cputime(curr, delta_exec);
1042
1043 if (!rt_bandwidth_enabled())
1044 return;
1045
1046 for_each_sched_rt_entity(rt_se) {
1047 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1048 int exceeded;
1049
1050 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1051 raw_spin_lock(&rt_rq->rt_runtime_lock);
1052 rt_rq->rt_time += delta_exec;
1053 exceeded = sched_rt_runtime_exceeded(rt_rq);
1054 if (exceeded)
1055 resched_curr(rq);
1056 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1057 if (exceeded)
1058 try_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1059 }
1060 }
1061 }
1062
1063 static void
dequeue_top_rt_rq(struct rt_rq * rt_rq,unsigned int count)1064 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
1065 {
1066 struct rq *rq = rq_of_rt_rq(rt_rq);
1067
1068 BUG_ON(&rq->rt != rt_rq);
1069
1070 if (!rt_rq->rt_queued)
1071 return;
1072
1073 BUG_ON(!rq->nr_running);
1074
1075 sub_nr_running(rq, count);
1076 rt_rq->rt_queued = 0;
1077
1078 }
1079
1080 static void
enqueue_top_rt_rq(struct rt_rq * rt_rq)1081 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1082 {
1083 struct rq *rq = rq_of_rt_rq(rt_rq);
1084
1085 BUG_ON(&rq->rt != rt_rq);
1086
1087 if (rt_rq->rt_queued)
1088 return;
1089
1090 if (rt_rq_throttled(rt_rq))
1091 return;
1092
1093 if (rt_rq->rt_nr_running) {
1094 add_nr_running(rq, rt_rq->rt_nr_running);
1095 rt_rq->rt_queued = 1;
1096 }
1097
1098 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1099 cpufreq_update_util(rq, 0);
1100 }
1101
1102 #if defined CONFIG_SMP
1103
1104 static void
inc_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1105 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1106 {
1107 struct rq *rq = rq_of_rt_rq(rt_rq);
1108
1109 #ifdef CONFIG_RT_GROUP_SCHED
1110 /*
1111 * Change rq's cpupri only if rt_rq is the top queue.
1112 */
1113 if (&rq->rt != rt_rq)
1114 return;
1115 #endif
1116 if (rq->online && prio < prev_prio)
1117 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1118 }
1119
1120 static void
dec_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1121 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1122 {
1123 struct rq *rq = rq_of_rt_rq(rt_rq);
1124
1125 #ifdef CONFIG_RT_GROUP_SCHED
1126 /*
1127 * Change rq's cpupri only if rt_rq is the top queue.
1128 */
1129 if (&rq->rt != rt_rq)
1130 return;
1131 #endif
1132 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1133 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1134 }
1135
1136 #else /* CONFIG_SMP */
1137
1138 static inline
inc_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1139 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1140 static inline
dec_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1141 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1142
1143 #endif /* CONFIG_SMP */
1144
1145 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1146 static void
inc_rt_prio(struct rt_rq * rt_rq,int prio)1147 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1148 {
1149 int prev_prio = rt_rq->highest_prio.curr;
1150
1151 if (prio < prev_prio)
1152 rt_rq->highest_prio.curr = prio;
1153
1154 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1155 }
1156
1157 static void
dec_rt_prio(struct rt_rq * rt_rq,int prio)1158 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1159 {
1160 int prev_prio = rt_rq->highest_prio.curr;
1161
1162 if (rt_rq->rt_nr_running) {
1163
1164 WARN_ON(prio < prev_prio);
1165
1166 /*
1167 * This may have been our highest task, and therefore
1168 * we may have some recomputation to do
1169 */
1170 if (prio == prev_prio) {
1171 struct rt_prio_array *array = &rt_rq->active;
1172
1173 rt_rq->highest_prio.curr =
1174 sched_find_first_bit(array->bitmap);
1175 }
1176
1177 } else
1178 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1179
1180 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1181 }
1182
1183 #else
1184
inc_rt_prio(struct rt_rq * rt_rq,int prio)1185 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
dec_rt_prio(struct rt_rq * rt_rq,int prio)1186 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1187
1188 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1189
1190 #ifdef CONFIG_RT_GROUP_SCHED
1191
1192 static void
inc_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1193 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1194 {
1195 if (rt_se_boosted(rt_se))
1196 rt_rq->rt_nr_boosted++;
1197
1198 if (rt_rq->tg)
1199 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1200 }
1201
1202 static void
dec_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1203 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1204 {
1205 if (rt_se_boosted(rt_se))
1206 rt_rq->rt_nr_boosted--;
1207
1208 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1209 }
1210
1211 #else /* CONFIG_RT_GROUP_SCHED */
1212
1213 static void
inc_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1214 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1215 {
1216 start_rt_bandwidth(&def_rt_bandwidth);
1217 }
1218
1219 static inline
dec_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1220 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1221
1222 #endif /* CONFIG_RT_GROUP_SCHED */
1223
1224 static inline
rt_se_nr_running(struct sched_rt_entity * rt_se)1225 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1226 {
1227 struct rt_rq *group_rq = group_rt_rq(rt_se);
1228
1229 if (group_rq)
1230 return group_rq->rt_nr_running;
1231 else
1232 return 1;
1233 }
1234
1235 static inline
rt_se_rr_nr_running(struct sched_rt_entity * rt_se)1236 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1237 {
1238 struct rt_rq *group_rq = group_rt_rq(rt_se);
1239 struct task_struct *tsk;
1240
1241 if (group_rq)
1242 return group_rq->rr_nr_running;
1243
1244 tsk = rt_task_of(rt_se);
1245
1246 return (tsk->policy == SCHED_RR) ? 1 : 0;
1247 }
1248
1249 static inline
inc_rt_tasks(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1250 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1251 {
1252 int prio = rt_se_prio(rt_se);
1253
1254 WARN_ON(!rt_prio(prio));
1255 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1256 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1257
1258 inc_rt_prio(rt_rq, prio);
1259 inc_rt_migration(rt_se, rt_rq);
1260 inc_rt_group(rt_se, rt_rq);
1261 }
1262
1263 static inline
dec_rt_tasks(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1264 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1265 {
1266 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1267 WARN_ON(!rt_rq->rt_nr_running);
1268 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1269 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1270
1271 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1272 dec_rt_migration(rt_se, rt_rq);
1273 dec_rt_group(rt_se, rt_rq);
1274 }
1275
1276 /*
1277 * Change rt_se->run_list location unless SAVE && !MOVE
1278 *
1279 * assumes ENQUEUE/DEQUEUE flags match
1280 */
move_entity(unsigned int flags)1281 static inline bool move_entity(unsigned int flags)
1282 {
1283 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1284 return false;
1285
1286 return true;
1287 }
1288
__delist_rt_entity(struct sched_rt_entity * rt_se,struct rt_prio_array * array)1289 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1290 {
1291 list_del_init(&rt_se->run_list);
1292
1293 if (list_empty(array->queue + rt_se_prio(rt_se)))
1294 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1295
1296 rt_se->on_list = 0;
1297 }
1298
__enqueue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1299 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1300 {
1301 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1302 struct rt_prio_array *array = &rt_rq->active;
1303 struct rt_rq *group_rq = group_rt_rq(rt_se);
1304 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1305
1306 /*
1307 * Don't enqueue the group if its throttled, or when empty.
1308 * The latter is a consequence of the former when a child group
1309 * get throttled and the current group doesn't have any other
1310 * active members.
1311 */
1312 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1313 if (rt_se->on_list)
1314 __delist_rt_entity(rt_se, array);
1315 return;
1316 }
1317
1318 if (move_entity(flags)) {
1319 WARN_ON_ONCE(rt_se->on_list);
1320 if (flags & ENQUEUE_HEAD)
1321 list_add(&rt_se->run_list, queue);
1322 else
1323 list_add_tail(&rt_se->run_list, queue);
1324
1325 __set_bit(rt_se_prio(rt_se), array->bitmap);
1326 rt_se->on_list = 1;
1327 }
1328 rt_se->on_rq = 1;
1329
1330 inc_rt_tasks(rt_se, rt_rq);
1331 }
1332
__dequeue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1333 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1334 {
1335 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1336 struct rt_prio_array *array = &rt_rq->active;
1337
1338 if (move_entity(flags)) {
1339 WARN_ON_ONCE(!rt_se->on_list);
1340 __delist_rt_entity(rt_se, array);
1341 }
1342 rt_se->on_rq = 0;
1343
1344 dec_rt_tasks(rt_se, rt_rq);
1345 }
1346
1347 /*
1348 * Because the prio of an upper entry depends on the lower
1349 * entries, we must remove entries top - down.
1350 */
dequeue_rt_stack(struct sched_rt_entity * rt_se,unsigned int flags)1351 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1352 {
1353 struct sched_rt_entity *back = NULL;
1354 unsigned int rt_nr_running;
1355
1356 for_each_sched_rt_entity(rt_se) {
1357 rt_se->back = back;
1358 back = rt_se;
1359 }
1360
1361 rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
1362
1363 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1364 if (on_rt_rq(rt_se))
1365 __dequeue_rt_entity(rt_se, flags);
1366 }
1367
1368 dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
1369 }
1370
enqueue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1371 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1372 {
1373 struct rq *rq = rq_of_rt_se(rt_se);
1374
1375 dequeue_rt_stack(rt_se, flags);
1376 for_each_sched_rt_entity(rt_se)
1377 __enqueue_rt_entity(rt_se, flags);
1378 enqueue_top_rt_rq(&rq->rt);
1379 }
1380
dequeue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1381 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1382 {
1383 struct rq *rq = rq_of_rt_se(rt_se);
1384
1385 dequeue_rt_stack(rt_se, flags);
1386
1387 for_each_sched_rt_entity(rt_se) {
1388 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1389
1390 if (rt_rq && rt_rq->rt_nr_running)
1391 __enqueue_rt_entity(rt_se, flags);
1392 }
1393 enqueue_top_rt_rq(&rq->rt);
1394 }
1395
1396 /*
1397 * Adding/removing a task to/from a priority array:
1398 */
1399 static void
enqueue_task_rt(struct rq * rq,struct task_struct * p,int flags)1400 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1401 {
1402 struct sched_rt_entity *rt_se = &p->rt;
1403
1404 if (flags & ENQUEUE_WAKEUP)
1405 rt_se->timeout = 0;
1406
1407 enqueue_rt_entity(rt_se, flags);
1408 walt_inc_cumulative_runnable_avg(rq, p);
1409
1410 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1411 enqueue_pushable_task(rq, p);
1412 }
1413
dequeue_task_rt(struct rq * rq,struct task_struct * p,int flags)1414 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1415 {
1416 struct sched_rt_entity *rt_se = &p->rt;
1417
1418 update_curr_rt(rq);
1419 dequeue_rt_entity(rt_se, flags);
1420 walt_dec_cumulative_runnable_avg(rq, p);
1421
1422 dequeue_pushable_task(rq, p);
1423 }
1424
1425 /*
1426 * Put task to the head or the end of the run list without the overhead of
1427 * dequeue followed by enqueue.
1428 */
1429 static void
requeue_rt_entity(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int head)1430 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1431 {
1432 if (on_rt_rq(rt_se)) {
1433 struct rt_prio_array *array = &rt_rq->active;
1434 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1435
1436 if (head)
1437 list_move(&rt_se->run_list, queue);
1438 else
1439 list_move_tail(&rt_se->run_list, queue);
1440 }
1441 }
1442
requeue_task_rt(struct rq * rq,struct task_struct * p,int head)1443 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1444 {
1445 struct sched_rt_entity *rt_se = &p->rt;
1446 struct rt_rq *rt_rq;
1447
1448 for_each_sched_rt_entity(rt_se) {
1449 rt_rq = rt_rq_of_se(rt_se);
1450 requeue_rt_entity(rt_rq, rt_se, head);
1451 }
1452 }
1453
yield_task_rt(struct rq * rq)1454 static void yield_task_rt(struct rq *rq)
1455 {
1456 requeue_task_rt(rq, rq->curr, 0);
1457 }
1458
1459 #ifdef CONFIG_SMP
1460 static int find_lowest_rq(struct task_struct *task);
1461
1462 static int
select_task_rq_rt(struct task_struct * p,int cpu,int sd_flag,int flags)1463 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1464 {
1465 struct task_struct *curr;
1466 struct rq *rq;
1467 bool test;
1468
1469 /* For anything but wake ups, just return the task_cpu */
1470 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1471 goto out;
1472
1473 rq = cpu_rq(cpu);
1474
1475 rcu_read_lock();
1476 curr = READ_ONCE(rq->curr); /* unlocked access */
1477
1478 /*
1479 * If the current task on @p's runqueue is an RT task, then
1480 * try to see if we can wake this RT task up on another
1481 * runqueue. Otherwise simply start this RT task
1482 * on its current runqueue.
1483 *
1484 * We want to avoid overloading runqueues. If the woken
1485 * task is a higher priority, then it will stay on this CPU
1486 * and the lower prio task should be moved to another CPU.
1487 * Even though this will probably make the lower prio task
1488 * lose its cache, we do not want to bounce a higher task
1489 * around just because it gave up its CPU, perhaps for a
1490 * lock?
1491 *
1492 * For equal prio tasks, we just let the scheduler sort it out.
1493 *
1494 * Otherwise, just let it ride on the affined RQ and the
1495 * post-schedule router will push the preempted task away
1496 *
1497 * This test is optimistic, if we get it wrong the load-balancer
1498 * will have to sort it out.
1499 *
1500 * We take into account the capacity of the CPU to ensure it fits the
1501 * requirement of the task - which is only important on heterogeneous
1502 * systems like big.LITTLE.
1503 */
1504 test = curr &&
1505 unlikely(rt_task(curr)) &&
1506 (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
1507 #ifdef CONFIG_SCHED_RT_CAS
1508 test |= sysctl_sched_enable_rt_cas;
1509 #endif
1510
1511 if (test || !rt_task_fits_capacity(p, cpu)) {
1512 int target = find_lowest_rq(p);
1513
1514 /*
1515 * Bail out if we were forcing a migration to find a better
1516 * fitting CPU but our search failed.
1517 */
1518 if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1519 goto out_unlock;
1520
1521 /*
1522 * Don't bother moving it if the destination CPU is
1523 * not running a lower priority task.
1524 */
1525 if (target != -1 && (
1526 #ifdef CONFIG_SCHED_RT_CAS
1527 sysctl_sched_enable_rt_cas ||
1528 #endif
1529 p->prio < cpu_rq(target)->rt.highest_prio.curr))
1530 cpu = target;
1531 }
1532
1533 out_unlock:
1534 rcu_read_unlock();
1535
1536 out:
1537 return cpu;
1538 }
1539
check_preempt_equal_prio(struct rq * rq,struct task_struct * p)1540 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1541 {
1542 /*
1543 * Current can't be migrated, useless to reschedule,
1544 * let's hope p can move out.
1545 */
1546 if (rq->curr->nr_cpus_allowed == 1 ||
1547 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1548 return;
1549
1550 /*
1551 * p is migratable, so let's not schedule it and
1552 * see if it is pushed or pulled somewhere else.
1553 */
1554 if (p->nr_cpus_allowed != 1 &&
1555 cpupri_find(&rq->rd->cpupri, p, NULL))
1556 return;
1557
1558 /*
1559 * There appear to be other CPUs that can accept
1560 * the current task but none can run 'p', so lets reschedule
1561 * to try and push the current task away:
1562 */
1563 requeue_task_rt(rq, p, 1);
1564 resched_curr(rq);
1565 }
1566
balance_rt(struct rq * rq,struct task_struct * p,struct rq_flags * rf)1567 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1568 {
1569 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1570 /*
1571 * This is OK, because current is on_cpu, which avoids it being
1572 * picked for load-balance and preemption/IRQs are still
1573 * disabled avoiding further scheduler activity on it and we've
1574 * not yet started the picking loop.
1575 */
1576 rq_unpin_lock(rq, rf);
1577 pull_rt_task(rq);
1578 rq_repin_lock(rq, rf);
1579 }
1580
1581 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1582 }
1583 #endif /* CONFIG_SMP */
1584
1585 /*
1586 * Preempt the current task with a newly woken task if needed:
1587 */
check_preempt_curr_rt(struct rq * rq,struct task_struct * p,int flags)1588 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1589 {
1590 if (p->prio < rq->curr->prio) {
1591 resched_curr(rq);
1592 return;
1593 }
1594
1595 #ifdef CONFIG_SMP
1596 /*
1597 * If:
1598 *
1599 * - the newly woken task is of equal priority to the current task
1600 * - the newly woken task is non-migratable while current is migratable
1601 * - current will be preempted on the next reschedule
1602 *
1603 * we should check to see if current can readily move to a different
1604 * cpu. If so, we will reschedule to allow the push logic to try
1605 * to move current somewhere else, making room for our non-migratable
1606 * task.
1607 */
1608 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1609 check_preempt_equal_prio(rq, p);
1610 #endif
1611 }
1612
set_next_task_rt(struct rq * rq,struct task_struct * p,bool first)1613 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1614 {
1615 p->se.exec_start = rq_clock_task(rq);
1616
1617 /* The running task is never eligible for pushing */
1618 dequeue_pushable_task(rq, p);
1619
1620 if (!first)
1621 return;
1622
1623 /*
1624 * If prev task was rt, put_prev_task() has already updated the
1625 * utilization. We only care of the case where we start to schedule a
1626 * rt task
1627 */
1628 if (rq->curr->sched_class != &rt_sched_class)
1629 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1630
1631 rt_queue_push_tasks(rq);
1632 }
1633
pick_next_rt_entity(struct rq * rq,struct rt_rq * rt_rq)1634 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1635 struct rt_rq *rt_rq)
1636 {
1637 struct rt_prio_array *array = &rt_rq->active;
1638 struct sched_rt_entity *next = NULL;
1639 struct list_head *queue;
1640 int idx;
1641
1642 idx = sched_find_first_bit(array->bitmap);
1643 BUG_ON(idx >= MAX_RT_PRIO);
1644
1645 queue = array->queue + idx;
1646 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1647
1648 return next;
1649 }
1650
_pick_next_task_rt(struct rq * rq)1651 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1652 {
1653 struct sched_rt_entity *rt_se;
1654 struct rt_rq *rt_rq = &rq->rt;
1655
1656 do {
1657 rt_se = pick_next_rt_entity(rq, rt_rq);
1658 BUG_ON(!rt_se);
1659 rt_rq = group_rt_rq(rt_se);
1660 } while (rt_rq);
1661
1662 return rt_task_of(rt_se);
1663 }
1664
pick_next_task_rt(struct rq * rq)1665 static struct task_struct *pick_next_task_rt(struct rq *rq)
1666 {
1667 struct task_struct *p;
1668
1669 if (!sched_rt_runnable(rq))
1670 return NULL;
1671
1672 p = _pick_next_task_rt(rq);
1673 set_next_task_rt(rq, p, true);
1674 return p;
1675 }
1676
put_prev_task_rt(struct rq * rq,struct task_struct * p)1677 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1678 {
1679 update_curr_rt(rq);
1680
1681 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1682
1683 /*
1684 * The previous task needs to be made eligible for pushing
1685 * if it is still active
1686 */
1687 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1688 enqueue_pushable_task(rq, p);
1689 }
1690
1691 #ifdef CONFIG_SMP
1692
1693 /* Only try algorithms three times */
1694 #define RT_MAX_TRIES 3
1695
pick_rt_task(struct rq * rq,struct task_struct * p,int cpu)1696 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1697 {
1698 if (!task_running(rq, p) &&
1699 cpumask_test_cpu(cpu, p->cpus_ptr))
1700 return 1;
1701
1702 return 0;
1703 }
1704
1705 /*
1706 * Return the highest pushable rq's task, which is suitable to be executed
1707 * on the CPU, NULL otherwise
1708 */
pick_highest_pushable_task(struct rq * rq,int cpu)1709 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1710 {
1711 struct plist_head *head = &rq->rt.pushable_tasks;
1712 struct task_struct *p;
1713
1714 if (!has_pushable_tasks(rq))
1715 return NULL;
1716
1717 plist_for_each_entry(p, head, pushable_tasks) {
1718 if (pick_rt_task(rq, p, cpu))
1719 return p;
1720 }
1721
1722 return NULL;
1723 }
1724
1725 #ifdef CONFIG_SCHED_RT_CAS
find_cas_cpu(struct sched_domain * sd,struct task_struct * task,struct cpumask * lowest_mask)1726 static int find_cas_cpu(struct sched_domain *sd,
1727 struct task_struct *task, struct cpumask *lowest_mask)
1728 {
1729 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
1730 struct sched_group *sg = NULL;
1731 struct sched_group *sg_target = NULL;
1732 struct sched_group *sg_backup = NULL;
1733 struct cpumask search_cpu, backup_search_cpu;
1734 int cpu = -1;
1735 int target_cpu = -1;
1736 unsigned long cpu_capacity;
1737 unsigned long boosted_tutil = uclamp_task_util(task);
1738 unsigned long target_capacity = ULONG_MAX;
1739 unsigned long util;
1740 unsigned long target_cpu_util = ULONG_MAX;
1741 int prev_cpu = task_cpu(task);
1742 #ifdef CONFIG_SCHED_RTG
1743 struct cpumask *rtg_target = NULL;
1744 #endif
1745 bool boosted = uclamp_boosted(task);
1746
1747 if (!sysctl_sched_enable_rt_cas)
1748 return -1;
1749
1750 rcu_read_lock();
1751
1752 #ifdef CONFIG_SCHED_RTG
1753 rtg_target = find_rtg_target(task);
1754 #endif
1755
1756 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, 0));
1757 if (!sd) {
1758 rcu_read_unlock();
1759 return -1;
1760 }
1761
1762 sg = sd->groups;
1763 do {
1764 if (!cpumask_intersects(lowest_mask, sched_group_span(sg)))
1765 continue;
1766
1767 if (boosted) {
1768 if (cpumask_test_cpu(rd->max_cap_orig_cpu,
1769 sched_group_span(sg))) {
1770 sg_target = sg;
1771 break;
1772 }
1773 }
1774
1775 cpu = group_first_cpu(sg);
1776 #ifdef CONFIG_SCHED_RTG
1777 /* honor the rtg tasks */
1778 if (rtg_target) {
1779 if (cpumask_test_cpu(cpu, rtg_target)) {
1780 sg_target = sg;
1781 break;
1782 }
1783
1784 /* active LB or big_task favor cpus with more capacity */
1785 if (task->state == TASK_RUNNING || boosted) {
1786 if (capacity_orig_of(cpu) >
1787 capacity_orig_of(cpumask_any(rtg_target))) {
1788 sg_target = sg;
1789 break;
1790 }
1791
1792 sg_backup = sg;
1793 continue;
1794 }
1795 }
1796 #endif
1797 /*
1798 * 1. add margin to support task migration
1799 * 2. if task_util is high then all cpus, make sure the
1800 * sg_backup with the most powerful cpus is selected
1801 */
1802 if (!rt_task_fits_capacity(task, cpu)) {
1803 sg_backup = sg;
1804 continue;
1805 }
1806
1807 /* support task boost */
1808 cpu_capacity = capacity_orig_of(cpu);
1809 if (boosted_tutil > cpu_capacity) {
1810 sg_backup = sg;
1811 continue;
1812 }
1813
1814 /* sg_target: select the sg with smaller capacity */
1815 if (cpu_capacity < target_capacity) {
1816 target_capacity = cpu_capacity;
1817 sg_target = sg;
1818 }
1819 } while (sg = sg->next, sg != sd->groups);
1820
1821 if (!sg_target)
1822 sg_target = sg_backup;
1823
1824 if (sg_target) {
1825 cpumask_and(&search_cpu, lowest_mask, sched_group_span(sg_target));
1826 cpumask_copy(&backup_search_cpu, lowest_mask);
1827 cpumask_andnot(&backup_search_cpu, &backup_search_cpu, &search_cpu);
1828 } else {
1829 cpumask_copy(&search_cpu, lowest_mask);
1830 cpumask_clear(&backup_search_cpu);
1831 }
1832
1833 retry:
1834 cpu = cpumask_first(&search_cpu);
1835 do {
1836 trace_sched_find_cas_cpu_each(task, cpu, target_cpu,
1837 cpu_isolated(cpu),
1838 idle_cpu(cpu), boosted_tutil, cpu_util(cpu),
1839 capacity_orig_of(cpu));
1840
1841 if (cpu_isolated(cpu))
1842 continue;
1843
1844 if (!cpumask_test_cpu(cpu, task->cpus_ptr))
1845 continue;
1846
1847 /* find best cpu with smallest max_capacity */
1848 if (target_cpu != -1 &&
1849 capacity_orig_of(cpu) > capacity_orig_of(target_cpu))
1850 continue;
1851
1852 util = cpu_util(cpu);
1853
1854 /* Find the least loaded CPU */
1855 if (util > target_cpu_util)
1856 continue;
1857
1858 /*
1859 * If the preivous CPU has same load, keep it as
1860 * target_cpu
1861 */
1862 if (target_cpu_util == util && target_cpu == prev_cpu)
1863 continue;
1864
1865 /*
1866 * If candidate CPU is the previous CPU, select it.
1867 * If all above conditions are same, select the least
1868 * cumulative window demand CPU.
1869 */
1870 target_cpu_util = util;
1871 target_cpu = cpu;
1872 } while ((cpu = cpumask_next(cpu, &search_cpu)) < nr_cpu_ids);
1873
1874 if (target_cpu != -1 && cpumask_test_cpu(target_cpu, lowest_mask)) {
1875 goto done;
1876 } else if (!cpumask_empty(&backup_search_cpu)) {
1877 cpumask_copy(&search_cpu, &backup_search_cpu);
1878 cpumask_clear(&backup_search_cpu);
1879 goto retry;
1880 }
1881
1882 done:
1883 trace_sched_find_cas_cpu(task, lowest_mask, boosted_tutil, prev_cpu, target_cpu);
1884 rcu_read_unlock();
1885 return target_cpu;
1886 }
1887 #endif
1888
1889 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1890
find_lowest_rq(struct task_struct * task)1891 static int find_lowest_rq(struct task_struct *task)
1892 {
1893 struct sched_domain *sd;
1894 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1895 int this_cpu = smp_processor_id();
1896 int cpu = task_cpu(task);
1897 int ret;
1898 #ifdef CONFIG_SCHED_RT_CAS
1899 int cas_cpu;
1900 #endif
1901
1902 /* Make sure the mask is initialized first */
1903 if (unlikely(!lowest_mask))
1904 return -1;
1905
1906 if (task->nr_cpus_allowed == 1)
1907 return -1; /* No other targets possible */
1908
1909 /*
1910 * If we're on asym system ensure we consider the different capacities
1911 * of the CPUs when searching for the lowest_mask.
1912 */
1913 if (static_branch_unlikely(&sched_asym_cpucapacity)) {
1914
1915 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1916 task, lowest_mask,
1917 rt_task_fits_capacity);
1918 } else {
1919
1920 ret = cpupri_find(&task_rq(task)->rd->cpupri,
1921 task, lowest_mask);
1922 }
1923
1924 if (!ret)
1925 return -1; /* No targets found */
1926
1927 #ifdef CONFIG_SCHED_RT_CAS
1928 cas_cpu = find_cas_cpu(sd, task, lowest_mask);
1929 if (cas_cpu != -1)
1930 return cas_cpu;
1931 #endif
1932
1933 /*
1934 * At this point we have built a mask of CPUs representing the
1935 * lowest priority tasks in the system. Now we want to elect
1936 * the best one based on our affinity and topology.
1937 *
1938 * We prioritize the last CPU that the task executed on since
1939 * it is most likely cache-hot in that location.
1940 */
1941 if (cpumask_test_cpu(cpu, lowest_mask))
1942 return cpu;
1943
1944 /*
1945 * Otherwise, we consult the sched_domains span maps to figure
1946 * out which CPU is logically closest to our hot cache data.
1947 */
1948 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1949 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1950
1951 rcu_read_lock();
1952 for_each_domain(cpu, sd) {
1953 if (sd->flags & SD_WAKE_AFFINE) {
1954 int best_cpu;
1955
1956 /*
1957 * "this_cpu" is cheaper to preempt than a
1958 * remote processor.
1959 */
1960 if (this_cpu != -1 &&
1961 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1962 rcu_read_unlock();
1963 return this_cpu;
1964 }
1965
1966 best_cpu = cpumask_first_and(lowest_mask,
1967 sched_domain_span(sd));
1968 if (best_cpu < nr_cpu_ids) {
1969 rcu_read_unlock();
1970 return best_cpu;
1971 }
1972 }
1973 }
1974 rcu_read_unlock();
1975
1976 /*
1977 * And finally, if there were no matches within the domains
1978 * just give the caller *something* to work with from the compatible
1979 * locations.
1980 */
1981 if (this_cpu != -1)
1982 return this_cpu;
1983
1984 cpu = cpumask_any(lowest_mask);
1985 if (cpu < nr_cpu_ids)
1986 return cpu;
1987
1988 return -1;
1989 }
1990
pick_next_pushable_task(struct rq * rq)1991 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1992 {
1993 struct task_struct *p;
1994
1995 if (!has_pushable_tasks(rq))
1996 return NULL;
1997
1998 p = plist_first_entry(&rq->rt.pushable_tasks,
1999 struct task_struct, pushable_tasks);
2000
2001 BUG_ON(rq->cpu != task_cpu(p));
2002 BUG_ON(task_current(rq, p));
2003 BUG_ON(p->nr_cpus_allowed <= 1);
2004
2005 BUG_ON(!task_on_rq_queued(p));
2006 BUG_ON(!rt_task(p));
2007
2008 return p;
2009 }
2010
2011 /* Will lock the rq it finds */
find_lock_lowest_rq(struct task_struct * task,struct rq * rq)2012 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
2013 {
2014 struct rq *lowest_rq = NULL;
2015 int tries;
2016 int cpu;
2017
2018 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
2019 cpu = find_lowest_rq(task);
2020
2021 if ((cpu == -1) || (cpu == rq->cpu))
2022 break;
2023
2024 lowest_rq = cpu_rq(cpu);
2025
2026 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
2027 /*
2028 * Target rq has tasks of equal or higher priority,
2029 * retrying does not release any lock and is unlikely
2030 * to yield a different result.
2031 */
2032 lowest_rq = NULL;
2033 break;
2034 }
2035
2036 /* if the prio of this runqueue changed, try again */
2037 if (double_lock_balance(rq, lowest_rq)) {
2038 /*
2039 * We had to unlock the run queue. In
2040 * the mean time, task could have
2041 * migrated already or had its affinity changed.
2042 */
2043 struct task_struct *next_task = pick_next_pushable_task(rq);
2044 if (unlikely(next_task != task ||
2045 !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr))) {
2046 double_unlock_balance(rq, lowest_rq);
2047 lowest_rq = NULL;
2048 break;
2049 }
2050 }
2051
2052 /* If this rq is still suitable use it. */
2053 if (lowest_rq->rt.highest_prio.curr > task->prio)
2054 break;
2055
2056 /* try again */
2057 double_unlock_balance(rq, lowest_rq);
2058 lowest_rq = NULL;
2059 }
2060
2061 return lowest_rq;
2062 }
2063
2064 /*
2065 * If the current CPU has more than one RT task, see if the non
2066 * running task can migrate over to a CPU that is running a task
2067 * of lesser priority.
2068 */
push_rt_task(struct rq * rq)2069 static int push_rt_task(struct rq *rq)
2070 {
2071 struct task_struct *next_task;
2072 struct rq *lowest_rq;
2073 int ret = 0;
2074
2075 if (!rq->rt.overloaded)
2076 return 0;
2077
2078 next_task = pick_next_pushable_task(rq);
2079 if (!next_task)
2080 return 0;
2081
2082 retry:
2083 if (WARN_ON(next_task == rq->curr))
2084 return 0;
2085
2086 /*
2087 * It's possible that the next_task slipped in of
2088 * higher priority than current. If that's the case
2089 * just reschedule current.
2090 */
2091 if (unlikely(next_task->prio < rq->curr->prio)) {
2092 resched_curr(rq);
2093 return 0;
2094 }
2095
2096 /* We might release rq lock */
2097 get_task_struct(next_task);
2098
2099 /* find_lock_lowest_rq locks the rq if found */
2100 lowest_rq = find_lock_lowest_rq(next_task, rq);
2101 if (!lowest_rq) {
2102 struct task_struct *task;
2103 /*
2104 * find_lock_lowest_rq releases rq->lock
2105 * so it is possible that next_task has migrated.
2106 *
2107 * We need to make sure that the task is still on the same
2108 * run-queue and is also still the next task eligible for
2109 * pushing.
2110 */
2111 task = pick_next_pushable_task(rq);
2112 if (task == next_task) {
2113 /*
2114 * The task hasn't migrated, and is still the next
2115 * eligible task, but we failed to find a run-queue
2116 * to push it to. Do not retry in this case, since
2117 * other CPUs will pull from us when ready.
2118 */
2119 goto out;
2120 }
2121
2122 if (!task)
2123 /* No more tasks, just exit */
2124 goto out;
2125
2126 /*
2127 * Something has shifted, try again.
2128 */
2129 put_task_struct(next_task);
2130 next_task = task;
2131 goto retry;
2132 }
2133
2134 deactivate_task(rq, next_task, 0);
2135 set_task_cpu(next_task, lowest_rq->cpu);
2136 activate_task(lowest_rq, next_task, 0);
2137 ret = 1;
2138
2139 resched_curr(lowest_rq);
2140
2141 double_unlock_balance(rq, lowest_rq);
2142
2143 out:
2144 put_task_struct(next_task);
2145
2146 return ret;
2147 }
2148
push_rt_tasks(struct rq * rq)2149 static void push_rt_tasks(struct rq *rq)
2150 {
2151 /* push_rt_task will return true if it moved an RT */
2152 while (push_rt_task(rq))
2153 ;
2154 }
2155
2156 #ifdef HAVE_RT_PUSH_IPI
2157
2158 /*
2159 * When a high priority task schedules out from a CPU and a lower priority
2160 * task is scheduled in, a check is made to see if there's any RT tasks
2161 * on other CPUs that are waiting to run because a higher priority RT task
2162 * is currently running on its CPU. In this case, the CPU with multiple RT
2163 * tasks queued on it (overloaded) needs to be notified that a CPU has opened
2164 * up that may be able to run one of its non-running queued RT tasks.
2165 *
2166 * All CPUs with overloaded RT tasks need to be notified as there is currently
2167 * no way to know which of these CPUs have the highest priority task waiting
2168 * to run. Instead of trying to take a spinlock on each of these CPUs,
2169 * which has shown to cause large latency when done on machines with many
2170 * CPUs, sending an IPI to the CPUs to have them push off the overloaded
2171 * RT tasks waiting to run.
2172 *
2173 * Just sending an IPI to each of the CPUs is also an issue, as on large
2174 * count CPU machines, this can cause an IPI storm on a CPU, especially
2175 * if its the only CPU with multiple RT tasks queued, and a large number
2176 * of CPUs scheduling a lower priority task at the same time.
2177 *
2178 * Each root domain has its own irq work function that can iterate over
2179 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
2180 * tassk must be checked if there's one or many CPUs that are lowering
2181 * their priority, there's a single irq work iterator that will try to
2182 * push off RT tasks that are waiting to run.
2183 *
2184 * When a CPU schedules a lower priority task, it will kick off the
2185 * irq work iterator that will jump to each CPU with overloaded RT tasks.
2186 * As it only takes the first CPU that schedules a lower priority task
2187 * to start the process, the rto_start variable is incremented and if
2188 * the atomic result is one, then that CPU will try to take the rto_lock.
2189 * This prevents high contention on the lock as the process handles all
2190 * CPUs scheduling lower priority tasks.
2191 *
2192 * All CPUs that are scheduling a lower priority task will increment the
2193 * rt_loop_next variable. This will make sure that the irq work iterator
2194 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
2195 * priority task, even if the iterator is in the middle of a scan. Incrementing
2196 * the rt_loop_next will cause the iterator to perform another scan.
2197 *
2198 */
rto_next_cpu(struct root_domain * rd)2199 static int rto_next_cpu(struct root_domain *rd)
2200 {
2201 int next;
2202 int cpu;
2203
2204 /*
2205 * When starting the IPI RT pushing, the rto_cpu is set to -1,
2206 * rt_next_cpu() will simply return the first CPU found in
2207 * the rto_mask.
2208 *
2209 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
2210 * will return the next CPU found in the rto_mask.
2211 *
2212 * If there are no more CPUs left in the rto_mask, then a check is made
2213 * against rto_loop and rto_loop_next. rto_loop is only updated with
2214 * the rto_lock held, but any CPU may increment the rto_loop_next
2215 * without any locking.
2216 */
2217 for (;;) {
2218
2219 /* When rto_cpu is -1 this acts like cpumask_first() */
2220 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
2221
2222 rd->rto_cpu = cpu;
2223
2224 if (cpu < nr_cpu_ids)
2225 return cpu;
2226
2227 rd->rto_cpu = -1;
2228
2229 /*
2230 * ACQUIRE ensures we see the @rto_mask changes
2231 * made prior to the @next value observed.
2232 *
2233 * Matches WMB in rt_set_overload().
2234 */
2235 next = atomic_read_acquire(&rd->rto_loop_next);
2236
2237 if (rd->rto_loop == next)
2238 break;
2239
2240 rd->rto_loop = next;
2241 }
2242
2243 return -1;
2244 }
2245
rto_start_trylock(atomic_t * v)2246 static inline bool rto_start_trylock(atomic_t *v)
2247 {
2248 return !atomic_cmpxchg_acquire(v, 0, 1);
2249 }
2250
rto_start_unlock(atomic_t * v)2251 static inline void rto_start_unlock(atomic_t *v)
2252 {
2253 atomic_set_release(v, 0);
2254 }
2255
tell_cpu_to_push(struct rq * rq)2256 static void tell_cpu_to_push(struct rq *rq)
2257 {
2258 int cpu = -1;
2259
2260 /* Keep the loop going if the IPI is currently active */
2261 atomic_inc(&rq->rd->rto_loop_next);
2262
2263 /* Only one CPU can initiate a loop at a time */
2264 if (!rto_start_trylock(&rq->rd->rto_loop_start))
2265 return;
2266
2267 raw_spin_lock(&rq->rd->rto_lock);
2268
2269 /*
2270 * The rto_cpu is updated under the lock, if it has a valid CPU
2271 * then the IPI is still running and will continue due to the
2272 * update to loop_next, and nothing needs to be done here.
2273 * Otherwise it is finishing up and an ipi needs to be sent.
2274 */
2275 if (rq->rd->rto_cpu < 0)
2276 cpu = rto_next_cpu(rq->rd);
2277
2278 raw_spin_unlock(&rq->rd->rto_lock);
2279
2280 rto_start_unlock(&rq->rd->rto_loop_start);
2281
2282 if (cpu >= 0) {
2283 /* Make sure the rd does not get freed while pushing */
2284 sched_get_rd(rq->rd);
2285 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2286 }
2287 }
2288
2289 /* Called from hardirq context */
rto_push_irq_work_func(struct irq_work * work)2290 void rto_push_irq_work_func(struct irq_work *work)
2291 {
2292 struct root_domain *rd =
2293 container_of(work, struct root_domain, rto_push_work);
2294 struct rq *rq;
2295 int cpu;
2296
2297 rq = this_rq();
2298
2299 /*
2300 * We do not need to grab the lock to check for has_pushable_tasks.
2301 * When it gets updated, a check is made if a push is possible.
2302 */
2303 if (has_pushable_tasks(rq)) {
2304 raw_spin_lock(&rq->lock);
2305 push_rt_tasks(rq);
2306 raw_spin_unlock(&rq->lock);
2307 }
2308
2309 raw_spin_lock(&rd->rto_lock);
2310
2311 /* Pass the IPI to the next rt overloaded queue */
2312 cpu = rto_next_cpu(rd);
2313
2314 raw_spin_unlock(&rd->rto_lock);
2315
2316 if (cpu < 0) {
2317 sched_put_rd(rd);
2318 return;
2319 }
2320
2321 /* Try the next RT overloaded CPU */
2322 irq_work_queue_on(&rd->rto_push_work, cpu);
2323 }
2324 #endif /* HAVE_RT_PUSH_IPI */
2325
pull_rt_task(struct rq * this_rq)2326 static void pull_rt_task(struct rq *this_rq)
2327 {
2328 int this_cpu = this_rq->cpu, cpu;
2329 bool resched = false;
2330 struct task_struct *p;
2331 struct rq *src_rq;
2332 int rt_overload_count = rt_overloaded(this_rq);
2333
2334 if (likely(!rt_overload_count))
2335 return;
2336
2337 /*
2338 * Match the barrier from rt_set_overloaded; this guarantees that if we
2339 * see overloaded we must also see the rto_mask bit.
2340 */
2341 smp_rmb();
2342
2343 /* If we are the only overloaded CPU do nothing */
2344 if (rt_overload_count == 1 &&
2345 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2346 return;
2347
2348 #ifdef HAVE_RT_PUSH_IPI
2349 if (sched_feat(RT_PUSH_IPI)) {
2350 tell_cpu_to_push(this_rq);
2351 return;
2352 }
2353 #endif
2354
2355 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2356 if (this_cpu == cpu)
2357 continue;
2358
2359 src_rq = cpu_rq(cpu);
2360
2361 /*
2362 * Don't bother taking the src_rq->lock if the next highest
2363 * task is known to be lower-priority than our current task.
2364 * This may look racy, but if this value is about to go
2365 * logically higher, the src_rq will push this task away.
2366 * And if its going logically lower, we do not care
2367 */
2368 if (src_rq->rt.highest_prio.next >=
2369 this_rq->rt.highest_prio.curr)
2370 continue;
2371
2372 /*
2373 * We can potentially drop this_rq's lock in
2374 * double_lock_balance, and another CPU could
2375 * alter this_rq
2376 */
2377 double_lock_balance(this_rq, src_rq);
2378
2379 /*
2380 * We can pull only a task, which is pushable
2381 * on its rq, and no others.
2382 */
2383 p = pick_highest_pushable_task(src_rq, this_cpu);
2384
2385 /*
2386 * Do we have an RT task that preempts
2387 * the to-be-scheduled task?
2388 */
2389 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2390 WARN_ON(p == src_rq->curr);
2391 WARN_ON(!task_on_rq_queued(p));
2392
2393 /*
2394 * There's a chance that p is higher in priority
2395 * than what's currently running on its CPU.
2396 * This is just that p is wakeing up and hasn't
2397 * had a chance to schedule. We only pull
2398 * p if it is lower in priority than the
2399 * current task on the run queue
2400 */
2401 if (p->prio < src_rq->curr->prio)
2402 goto skip;
2403
2404 resched = true;
2405
2406 deactivate_task(src_rq, p, 0);
2407 set_task_cpu(p, this_cpu);
2408 activate_task(this_rq, p, 0);
2409 /*
2410 * We continue with the search, just in
2411 * case there's an even higher prio task
2412 * in another runqueue. (low likelihood
2413 * but possible)
2414 */
2415 }
2416 skip:
2417 double_unlock_balance(this_rq, src_rq);
2418 }
2419
2420 if (resched)
2421 resched_curr(this_rq);
2422 }
2423
2424 /*
2425 * If we are not running and we are not going to reschedule soon, we should
2426 * try to push tasks away now
2427 */
task_woken_rt(struct rq * rq,struct task_struct * p)2428 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2429 {
2430 bool need_to_push = !task_running(rq, p) &&
2431 !test_tsk_need_resched(rq->curr) &&
2432 p->nr_cpus_allowed > 1 &&
2433 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2434 (rq->curr->nr_cpus_allowed < 2 ||
2435 rq->curr->prio <= p->prio);
2436
2437 if (need_to_push)
2438 push_rt_tasks(rq);
2439 }
2440
2441 /* Assumes rq->lock is held */
rq_online_rt(struct rq * rq)2442 static void rq_online_rt(struct rq *rq)
2443 {
2444 if (rq->rt.overloaded)
2445 rt_set_overload(rq);
2446
2447 __enable_runtime(rq);
2448
2449 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2450 }
2451
2452 /* Assumes rq->lock is held */
rq_offline_rt(struct rq * rq)2453 static void rq_offline_rt(struct rq *rq)
2454 {
2455 if (rq->rt.overloaded)
2456 rt_clear_overload(rq);
2457
2458 __disable_runtime(rq);
2459
2460 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2461 }
2462
2463 /*
2464 * When switch from the rt queue, we bring ourselves to a position
2465 * that we might want to pull RT tasks from other runqueues.
2466 */
switched_from_rt(struct rq * rq,struct task_struct * p)2467 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2468 {
2469 /*
2470 * If there are other RT tasks then we will reschedule
2471 * and the scheduling of the other RT tasks will handle
2472 * the balancing. But if we are the last RT task
2473 * we may need to handle the pulling of RT tasks
2474 * now.
2475 */
2476 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running ||
2477 cpu_isolated(cpu_of(rq)))
2478 return;
2479
2480 rt_queue_pull_task(rq);
2481 }
2482
init_sched_rt_class(void)2483 void __init init_sched_rt_class(void)
2484 {
2485 unsigned int i;
2486
2487 for_each_possible_cpu(i) {
2488 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2489 GFP_KERNEL, cpu_to_node(i));
2490 }
2491 }
2492 #endif /* CONFIG_SMP */
2493
2494 /*
2495 * When switching a task to RT, we may overload the runqueue
2496 * with RT tasks. In this case we try to push them off to
2497 * other runqueues.
2498 */
switched_to_rt(struct rq * rq,struct task_struct * p)2499 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2500 {
2501 /*
2502 * If we are running, update the avg_rt tracking, as the running time
2503 * will now on be accounted into the latter.
2504 */
2505 if (task_current(rq, p)) {
2506 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2507 return;
2508 }
2509
2510 /*
2511 * If we are not running we may need to preempt the current
2512 * running task. If that current running task is also an RT task
2513 * then see if we can move to another run queue.
2514 */
2515 if (task_on_rq_queued(p)) {
2516 #ifdef CONFIG_SMP
2517 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2518 rt_queue_push_tasks(rq);
2519 #endif /* CONFIG_SMP */
2520 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2521 resched_curr(rq);
2522 }
2523 }
2524
2525 /*
2526 * Priority of the task has changed. This may cause
2527 * us to initiate a push or pull.
2528 */
2529 static void
prio_changed_rt(struct rq * rq,struct task_struct * p,int oldprio)2530 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2531 {
2532 if (!task_on_rq_queued(p))
2533 return;
2534
2535 if (rq->curr == p) {
2536 #ifdef CONFIG_SMP
2537 /*
2538 * If our priority decreases while running, we
2539 * may need to pull tasks to this runqueue.
2540 */
2541 if (oldprio < p->prio)
2542 rt_queue_pull_task(rq);
2543
2544 /*
2545 * If there's a higher priority task waiting to run
2546 * then reschedule.
2547 */
2548 if (p->prio > rq->rt.highest_prio.curr)
2549 resched_curr(rq);
2550 #else
2551 /* For UP simply resched on drop of prio */
2552 if (oldprio < p->prio)
2553 resched_curr(rq);
2554 #endif /* CONFIG_SMP */
2555 } else {
2556 /*
2557 * This task is not running, but if it is
2558 * greater than the current running task
2559 * then reschedule.
2560 */
2561 if (p->prio < rq->curr->prio)
2562 resched_curr(rq);
2563 }
2564 }
2565
2566 #ifdef CONFIG_POSIX_TIMERS
watchdog(struct rq * rq,struct task_struct * p)2567 static void watchdog(struct rq *rq, struct task_struct *p)
2568 {
2569 unsigned long soft, hard;
2570
2571 /* max may change after cur was read, this will be fixed next tick */
2572 soft = task_rlimit(p, RLIMIT_RTTIME);
2573 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2574
2575 if (soft != RLIM_INFINITY) {
2576 unsigned long next;
2577
2578 if (p->rt.watchdog_stamp != jiffies) {
2579 p->rt.timeout++;
2580 p->rt.watchdog_stamp = jiffies;
2581 }
2582
2583 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2584 if (p->rt.timeout > next) {
2585 posix_cputimers_rt_watchdog(&p->posix_cputimers,
2586 p->se.sum_exec_runtime);
2587 }
2588 }
2589 }
2590 #else
watchdog(struct rq * rq,struct task_struct * p)2591 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2592 #endif
2593
2594 /*
2595 * scheduler tick hitting a task of our scheduling class.
2596 *
2597 * NOTE: This function can be called remotely by the tick offload that
2598 * goes along full dynticks. Therefore no local assumption can be made
2599 * and everything must be accessed through the @rq and @curr passed in
2600 * parameters.
2601 */
task_tick_rt(struct rq * rq,struct task_struct * p,int queued)2602 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2603 {
2604 struct sched_rt_entity *rt_se = &p->rt;
2605
2606 update_curr_rt(rq);
2607 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2608
2609 watchdog(rq, p);
2610
2611 /*
2612 * RR tasks need a special form of timeslice management.
2613 * FIFO tasks have no timeslices.
2614 */
2615 if (p->policy != SCHED_RR)
2616 return;
2617
2618 if (--p->rt.time_slice)
2619 return;
2620
2621 p->rt.time_slice = sched_rr_timeslice;
2622
2623 /*
2624 * Requeue to the end of queue if we (and all of our ancestors) are not
2625 * the only element on the queue
2626 */
2627 for_each_sched_rt_entity(rt_se) {
2628 if (rt_se->run_list.prev != rt_se->run_list.next) {
2629 requeue_task_rt(rq, p, 0);
2630 resched_curr(rq);
2631 return;
2632 }
2633 }
2634 }
2635
2636 #ifdef CONFIG_SCHED_RT_ACTIVE_LB
rt_active_load_balance_cpu_stop(void * data)2637 static int rt_active_load_balance_cpu_stop(void *data)
2638 {
2639 struct rq *busiest_rq = data;
2640 struct task_struct *next_task = busiest_rq->rt_push_task;
2641 struct rq *lowest_rq = NULL;
2642 unsigned long flags;
2643
2644 raw_spin_lock_irqsave(&busiest_rq->lock, flags);
2645 busiest_rq->rt_active_balance = 0;
2646
2647 if (!task_on_rq_queued(next_task) ||
2648 task_cpu(next_task) != cpu_of(busiest_rq))
2649 goto out;
2650
2651 /* find_lock_lowest_rq locks the rq if found */
2652 lowest_rq = find_lock_lowest_rq(next_task, busiest_rq);
2653 if (!lowest_rq)
2654 goto out;
2655
2656 if (capacity_orig_of(cpu_of(lowest_rq)) <= capacity_orig_of(task_cpu(next_task)))
2657 goto unlock;
2658
2659 deactivate_task(busiest_rq, next_task, 0);
2660 set_task_cpu(next_task, lowest_rq->cpu);
2661 activate_task(lowest_rq, next_task, 0);
2662
2663 resched_curr(lowest_rq);
2664 unlock:
2665 double_unlock_balance(busiest_rq, lowest_rq);
2666 out:
2667 put_task_struct(next_task);
2668 raw_spin_unlock_irqrestore(&busiest_rq->lock, flags);
2669
2670 return 0;
2671 }
2672
check_for_migration_rt(struct rq * rq,struct task_struct * p)2673 static void check_for_migration_rt(struct rq *rq, struct task_struct *p)
2674 {
2675 bool need_actvie_lb = false;
2676 bool misfit_task = false;
2677 int cpu = task_cpu(p);
2678 unsigned long cpu_orig_cap;
2679 #ifdef CONFIG_SCHED_RTG
2680 struct cpumask *rtg_target = NULL;
2681 #endif
2682
2683 if (!sysctl_sched_enable_rt_active_lb)
2684 return;
2685
2686 if (p->nr_cpus_allowed == 1)
2687 return;
2688
2689 cpu_orig_cap = capacity_orig_of(cpu);
2690 /* cpu has max capacity, no need to do balance */
2691 if (cpu_orig_cap == rq->rd->max_cpu_capacity)
2692 return;
2693
2694 #ifdef CONFIG_SCHED_RTG
2695 rtg_target = find_rtg_target(p);
2696 if (rtg_target)
2697 misfit_task = capacity_orig_of(cpumask_first(rtg_target)) >
2698 cpu_orig_cap;
2699 else
2700 misfit_task = !rt_task_fits_capacity(p, cpu);
2701 #else
2702 misfit_task = !rt_task_fits_capacity(p, cpu);
2703 #endif
2704
2705 if (misfit_task) {
2706 raw_spin_lock(&rq->lock);
2707 if (!rq->active_balance && !rq->rt_active_balance) {
2708 rq->rt_active_balance = 1;
2709 rq->rt_push_task = p;
2710 get_task_struct(p);
2711 need_actvie_lb = true;
2712 }
2713 raw_spin_unlock(&rq->lock);
2714
2715 if (need_actvie_lb)
2716 stop_one_cpu_nowait(task_cpu(p),
2717 rt_active_load_balance_cpu_stop,
2718 rq, &rq->rt_active_balance_work);
2719 }
2720 }
2721 #endif
2722
get_rr_interval_rt(struct rq * rq,struct task_struct * task)2723 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2724 {
2725 /*
2726 * Time slice is 0 for SCHED_FIFO tasks
2727 */
2728 if (task->policy == SCHED_RR)
2729 return sched_rr_timeslice;
2730 else
2731 return 0;
2732 }
2733
2734 const struct sched_class rt_sched_class
2735 __section("__rt_sched_class") = {
2736 .enqueue_task = enqueue_task_rt,
2737 .dequeue_task = dequeue_task_rt,
2738 .yield_task = yield_task_rt,
2739
2740 .check_preempt_curr = check_preempt_curr_rt,
2741
2742 .pick_next_task = pick_next_task_rt,
2743 .put_prev_task = put_prev_task_rt,
2744 .set_next_task = set_next_task_rt,
2745
2746 #ifdef CONFIG_SMP
2747 .balance = balance_rt,
2748 .select_task_rq = select_task_rq_rt,
2749 .set_cpus_allowed = set_cpus_allowed_common,
2750 .rq_online = rq_online_rt,
2751 .rq_offline = rq_offline_rt,
2752 .task_woken = task_woken_rt,
2753 .switched_from = switched_from_rt,
2754 #endif
2755
2756 .task_tick = task_tick_rt,
2757
2758 .get_rr_interval = get_rr_interval_rt,
2759
2760 .prio_changed = prio_changed_rt,
2761 .switched_to = switched_to_rt,
2762
2763 .update_curr = update_curr_rt,
2764
2765 #ifdef CONFIG_UCLAMP_TASK
2766 .uclamp_enabled = 1,
2767 #endif
2768 #ifdef CONFIG_SCHED_WALT
2769 .fixup_walt_sched_stats = fixup_walt_sched_stats_common,
2770 #endif
2771 #ifdef CONFIG_SCHED_RT_ACTIVE_LB
2772 .check_for_migration = check_for_migration_rt,
2773 #endif
2774 };
2775
2776 #ifdef CONFIG_RT_GROUP_SCHED
2777 /*
2778 * Ensure that the real time constraints are schedulable.
2779 */
2780 static DEFINE_MUTEX(rt_constraints_mutex);
2781
tg_has_rt_tasks(struct task_group * tg)2782 static inline int tg_has_rt_tasks(struct task_group *tg)
2783 {
2784 struct task_struct *task;
2785 struct css_task_iter it;
2786 int ret = 0;
2787
2788 /*
2789 * Autogroups do not have RT tasks; see autogroup_create().
2790 */
2791 if (task_group_is_autogroup(tg))
2792 return 0;
2793
2794 css_task_iter_start(&tg->css, 0, &it);
2795 while (!ret && (task = css_task_iter_next(&it)))
2796 ret |= rt_task(task);
2797 css_task_iter_end(&it);
2798
2799 return ret;
2800 }
2801
2802 struct rt_schedulable_data {
2803 struct task_group *tg;
2804 u64 rt_period;
2805 u64 rt_runtime;
2806 };
2807
tg_rt_schedulable(struct task_group * tg,void * data)2808 static int tg_rt_schedulable(struct task_group *tg, void *data)
2809 {
2810 struct rt_schedulable_data *d = data;
2811 struct task_group *child;
2812 unsigned long total, sum = 0;
2813 u64 period, runtime;
2814
2815 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2816 runtime = tg->rt_bandwidth.rt_runtime;
2817
2818 if (tg == d->tg) {
2819 period = d->rt_period;
2820 runtime = d->rt_runtime;
2821 }
2822
2823 /*
2824 * Cannot have more runtime than the period.
2825 */
2826 if (runtime > period && runtime != RUNTIME_INF)
2827 return -EINVAL;
2828
2829 /*
2830 * Ensure we don't starve existing RT tasks if runtime turns zero.
2831 */
2832 if (rt_bandwidth_enabled() && !runtime &&
2833 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
2834 return -EBUSY;
2835
2836 total = to_ratio(period, runtime);
2837
2838 /*
2839 * Nobody can have more than the global setting allows.
2840 */
2841 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2842 return -EINVAL;
2843
2844 /*
2845 * The sum of our children's runtime should not exceed our own.
2846 */
2847 list_for_each_entry_rcu(child, &tg->children, siblings) {
2848 period = ktime_to_ns(child->rt_bandwidth.rt_period);
2849 runtime = child->rt_bandwidth.rt_runtime;
2850
2851 if (child == d->tg) {
2852 period = d->rt_period;
2853 runtime = d->rt_runtime;
2854 }
2855
2856 sum += to_ratio(period, runtime);
2857 }
2858
2859 if (sum > total)
2860 return -EINVAL;
2861
2862 return 0;
2863 }
2864
__rt_schedulable(struct task_group * tg,u64 period,u64 runtime)2865 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2866 {
2867 int ret;
2868
2869 struct rt_schedulable_data data = {
2870 .tg = tg,
2871 .rt_period = period,
2872 .rt_runtime = runtime,
2873 };
2874
2875 rcu_read_lock();
2876 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2877 rcu_read_unlock();
2878
2879 return ret;
2880 }
2881
tg_set_rt_bandwidth(struct task_group * tg,u64 rt_period,u64 rt_runtime)2882 static int tg_set_rt_bandwidth(struct task_group *tg,
2883 u64 rt_period, u64 rt_runtime)
2884 {
2885 int i, err = 0;
2886
2887 /*
2888 * Disallowing the root group RT runtime is BAD, it would disallow the
2889 * kernel creating (and or operating) RT threads.
2890 */
2891 if (tg == &root_task_group && rt_runtime == 0)
2892 return -EINVAL;
2893
2894 /* No period doesn't make any sense. */
2895 if (rt_period == 0)
2896 return -EINVAL;
2897
2898 /*
2899 * Bound quota to defend quota against overflow during bandwidth shift.
2900 */
2901 if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2902 return -EINVAL;
2903
2904 mutex_lock(&rt_constraints_mutex);
2905 err = __rt_schedulable(tg, rt_period, rt_runtime);
2906 if (err)
2907 goto unlock;
2908
2909 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2910 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2911 tg->rt_bandwidth.rt_runtime = rt_runtime;
2912
2913 for_each_possible_cpu(i) {
2914 struct rt_rq *rt_rq = tg->rt_rq[i];
2915
2916 raw_spin_lock(&rt_rq->rt_runtime_lock);
2917 rt_rq->rt_runtime = rt_runtime;
2918 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2919 }
2920 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2921 unlock:
2922 mutex_unlock(&rt_constraints_mutex);
2923
2924 return err;
2925 }
2926
sched_group_set_rt_runtime(struct task_group * tg,long rt_runtime_us)2927 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2928 {
2929 u64 rt_runtime, rt_period;
2930
2931 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2932 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2933 if (rt_runtime_us < 0)
2934 rt_runtime = RUNTIME_INF;
2935 else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2936 return -EINVAL;
2937
2938 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2939 }
2940
sched_group_rt_runtime(struct task_group * tg)2941 long sched_group_rt_runtime(struct task_group *tg)
2942 {
2943 u64 rt_runtime_us;
2944
2945 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2946 return -1;
2947
2948 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2949 do_div(rt_runtime_us, NSEC_PER_USEC);
2950 return rt_runtime_us;
2951 }
2952
sched_group_set_rt_period(struct task_group * tg,u64 rt_period_us)2953 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2954 {
2955 u64 rt_runtime, rt_period;
2956
2957 if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2958 return -EINVAL;
2959
2960 rt_period = rt_period_us * NSEC_PER_USEC;
2961 rt_runtime = tg->rt_bandwidth.rt_runtime;
2962
2963 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2964 }
2965
sched_group_rt_period(struct task_group * tg)2966 long sched_group_rt_period(struct task_group *tg)
2967 {
2968 u64 rt_period_us;
2969
2970 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2971 do_div(rt_period_us, NSEC_PER_USEC);
2972 return rt_period_us;
2973 }
2974
sched_rt_global_constraints(void)2975 static int sched_rt_global_constraints(void)
2976 {
2977 int ret = 0;
2978
2979 mutex_lock(&rt_constraints_mutex);
2980 ret = __rt_schedulable(NULL, 0, 0);
2981 mutex_unlock(&rt_constraints_mutex);
2982
2983 return ret;
2984 }
2985
sched_rt_can_attach(struct task_group * tg,struct task_struct * tsk)2986 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2987 {
2988 /* Don't accept realtime tasks when there is no way for them to run */
2989 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2990 return 0;
2991
2992 return 1;
2993 }
2994
2995 #else /* !CONFIG_RT_GROUP_SCHED */
sched_rt_global_constraints(void)2996 static int sched_rt_global_constraints(void)
2997 {
2998 unsigned long flags;
2999 int i;
3000
3001 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
3002 for_each_possible_cpu(i) {
3003 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
3004
3005 raw_spin_lock(&rt_rq->rt_runtime_lock);
3006 rt_rq->rt_runtime = global_rt_runtime();
3007 raw_spin_unlock(&rt_rq->rt_runtime_lock);
3008 }
3009 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
3010
3011 return 0;
3012 }
3013 #endif /* CONFIG_RT_GROUP_SCHED */
3014
sched_rt_global_validate(void)3015 static int sched_rt_global_validate(void)
3016 {
3017 if (sysctl_sched_rt_period <= 0)
3018 return -EINVAL;
3019
3020 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
3021 ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
3022 ((u64)sysctl_sched_rt_runtime *
3023 NSEC_PER_USEC > max_rt_runtime)))
3024 return -EINVAL;
3025
3026 return 0;
3027 }
3028
sched_rt_do_global(void)3029 static void sched_rt_do_global(void)
3030 {
3031 raw_spin_lock(&def_rt_bandwidth.rt_runtime_lock);
3032 def_rt_bandwidth.rt_runtime = global_rt_runtime();
3033 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
3034 raw_spin_unlock(&def_rt_bandwidth.rt_runtime_lock);
3035 }
3036
sched_rt_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)3037 int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
3038 size_t *lenp, loff_t *ppos)
3039 {
3040 int old_period, old_runtime;
3041 static DEFINE_MUTEX(mutex);
3042 int ret;
3043
3044 mutex_lock(&mutex);
3045 old_period = sysctl_sched_rt_period;
3046 old_runtime = sysctl_sched_rt_runtime;
3047
3048 ret = proc_dointvec(table, write, buffer, lenp, ppos);
3049
3050 if (!ret && write) {
3051 ret = sched_rt_global_validate();
3052 if (ret)
3053 goto undo;
3054
3055 ret = sched_dl_global_validate();
3056 if (ret)
3057 goto undo;
3058
3059 ret = sched_rt_global_constraints();
3060 if (ret)
3061 goto undo;
3062
3063 sched_rt_do_global();
3064 sched_dl_do_global();
3065 }
3066 if (0) {
3067 undo:
3068 sysctl_sched_rt_period = old_period;
3069 sysctl_sched_rt_runtime = old_runtime;
3070 }
3071 mutex_unlock(&mutex);
3072
3073 return ret;
3074 }
3075
sched_rr_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)3076 int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
3077 size_t *lenp, loff_t *ppos)
3078 {
3079 int ret;
3080 static DEFINE_MUTEX(mutex);
3081
3082 mutex_lock(&mutex);
3083 ret = proc_dointvec(table, write, buffer, lenp, ppos);
3084 /*
3085 * Make sure that internally we keep jiffies.
3086 * Also, writing zero resets the timeslice to default:
3087 */
3088 if (!ret && write) {
3089 sched_rr_timeslice =
3090 sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
3091 msecs_to_jiffies(sysctl_sched_rr_timeslice);
3092 }
3093 mutex_unlock(&mutex);
3094
3095 return ret;
3096 }
3097
3098 #ifdef CONFIG_SCHED_DEBUG
print_rt_stats(struct seq_file * m,int cpu)3099 void print_rt_stats(struct seq_file *m, int cpu)
3100 {
3101 rt_rq_iter_t iter;
3102 struct rt_rq *rt_rq;
3103
3104 rcu_read_lock();
3105 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
3106 print_rt_rq(m, cpu, rt_rq);
3107 rcu_read_unlock();
3108 }
3109 #endif /* CONFIG_SCHED_DEBUG */
3110