1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4 * policies)
5 */
6 #include "sched.h"
7
8 #include "pelt.h"
9 #include "walt.h"
10
11 int sched_rr_timeslice = RR_TIMESLICE;
12 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
13 /* More than 4 hours if BW_SHIFT equals 20. */
14 static const u64 max_rt_runtime = MAX_BW;
15
16 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
17
18 struct rt_bandwidth def_rt_bandwidth;
19
20 #ifdef CONFIG_SCHED_RT_CAS
21 unsigned int sysctl_sched_enable_rt_cas = 1;
22 #endif
23
24 #ifdef CONFIG_SCHED_RT_ACTIVE_LB
25 unsigned int sysctl_sched_enable_rt_active_lb = 1;
26 #endif
27
sched_rt_period_timer(struct hrtimer * timer)28 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
29 {
30 struct rt_bandwidth *rt_b =
31 container_of(timer, struct rt_bandwidth, rt_period_timer);
32 int idle = 0;
33 int overrun;
34
35 raw_spin_lock(&rt_b->rt_runtime_lock);
36 for (;;) {
37 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
38 if (!overrun)
39 break;
40
41 raw_spin_unlock(&rt_b->rt_runtime_lock);
42 idle = do_sched_rt_period_timer(rt_b, overrun);
43 raw_spin_lock(&rt_b->rt_runtime_lock);
44 }
45 if (idle)
46 rt_b->rt_period_active = 0;
47 raw_spin_unlock(&rt_b->rt_runtime_lock);
48
49 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
50 }
51
init_rt_bandwidth(struct rt_bandwidth * rt_b,u64 period,u64 runtime)52 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
53 {
54 rt_b->rt_period = ns_to_ktime(period);
55 rt_b->rt_runtime = runtime;
56
57 raw_spin_lock_init(&rt_b->rt_runtime_lock);
58
59 hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
60 HRTIMER_MODE_REL_HARD);
61 rt_b->rt_period_timer.function = sched_rt_period_timer;
62 }
63
start_rt_bandwidth(struct rt_bandwidth * rt_b)64 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
65 {
66 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
67 return;
68
69 raw_spin_lock(&rt_b->rt_runtime_lock);
70 if (!rt_b->rt_period_active) {
71 rt_b->rt_period_active = 1;
72 /*
73 * SCHED_DEADLINE updates the bandwidth, as a run away
74 * RT task with a DL task could hog a CPU. But DL does
75 * not reset the period. If a deadline task was running
76 * without an RT task running, it can cause RT tasks to
77 * throttle when they start up. Kick the timer right away
78 * to update the period.
79 */
80 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
81 hrtimer_start_expires(&rt_b->rt_period_timer,
82 HRTIMER_MODE_ABS_PINNED_HARD);
83 }
84 raw_spin_unlock(&rt_b->rt_runtime_lock);
85 }
86
init_rt_rq(struct rt_rq * rt_rq)87 void init_rt_rq(struct rt_rq *rt_rq)
88 {
89 struct rt_prio_array *array;
90 int i;
91
92 array = &rt_rq->active;
93 for (i = 0; i < MAX_RT_PRIO; i++) {
94 INIT_LIST_HEAD(array->queue + i);
95 __clear_bit(i, array->bitmap);
96 }
97 /* delimiter for bitsearch: */
98 __set_bit(MAX_RT_PRIO, array->bitmap);
99
100 #if defined CONFIG_SMP
101 rt_rq->highest_prio.curr = MAX_RT_PRIO;
102 rt_rq->highest_prio.next = MAX_RT_PRIO;
103 rt_rq->rt_nr_migratory = 0;
104 rt_rq->overloaded = 0;
105 plist_head_init(&rt_rq->pushable_tasks);
106 #endif /* CONFIG_SMP */
107 /* We start is dequeued state, because no RT tasks are queued */
108 rt_rq->rt_queued = 0;
109
110 rt_rq->rt_time = 0;
111 rt_rq->rt_throttled = 0;
112 rt_rq->rt_runtime = 0;
113 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
114 }
115
116 #ifdef CONFIG_RT_GROUP_SCHED
destroy_rt_bandwidth(struct rt_bandwidth * rt_b)117 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
118 {
119 hrtimer_cancel(&rt_b->rt_period_timer);
120 }
121
122 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
123
rt_task_of(struct sched_rt_entity * rt_se)124 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
125 {
126 #ifdef CONFIG_SCHED_DEBUG
127 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
128 #endif
129 return container_of(rt_se, struct task_struct, rt);
130 }
131
rq_of_rt_rq(struct rt_rq * rt_rq)132 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
133 {
134 return rt_rq->rq;
135 }
136
rt_rq_of_se(struct sched_rt_entity * rt_se)137 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
138 {
139 return rt_se->rt_rq;
140 }
141
rq_of_rt_se(struct sched_rt_entity * rt_se)142 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
143 {
144 struct rt_rq *rt_rq = rt_se->rt_rq;
145
146 return rt_rq->rq;
147 }
148
free_rt_sched_group(struct task_group * tg)149 void free_rt_sched_group(struct task_group *tg)
150 {
151 int i;
152
153 if (tg->rt_se)
154 destroy_rt_bandwidth(&tg->rt_bandwidth);
155
156 for_each_possible_cpu(i) {
157 if (tg->rt_rq)
158 kfree(tg->rt_rq[i]);
159 if (tg->rt_se)
160 kfree(tg->rt_se[i]);
161 }
162
163 kfree(tg->rt_rq);
164 kfree(tg->rt_se);
165 }
166
init_tg_rt_entry(struct task_group * tg,struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int cpu,struct sched_rt_entity * parent)167 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
168 struct sched_rt_entity *rt_se, int cpu,
169 struct sched_rt_entity *parent)
170 {
171 struct rq *rq = cpu_rq(cpu);
172
173 rt_rq->highest_prio.curr = MAX_RT_PRIO;
174 rt_rq->rt_nr_boosted = 0;
175 rt_rq->rq = rq;
176 rt_rq->tg = tg;
177
178 tg->rt_rq[cpu] = rt_rq;
179 tg->rt_se[cpu] = rt_se;
180
181 if (!rt_se)
182 return;
183
184 if (!parent)
185 rt_se->rt_rq = &rq->rt;
186 else
187 rt_se->rt_rq = parent->my_q;
188
189 rt_se->my_q = rt_rq;
190 rt_se->parent = parent;
191 INIT_LIST_HEAD(&rt_se->run_list);
192 }
193
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)194 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
195 {
196 struct rt_rq *rt_rq;
197 struct sched_rt_entity *rt_se;
198 int i;
199
200 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
201 if (!tg->rt_rq)
202 goto err;
203 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
204 if (!tg->rt_se)
205 goto err;
206
207 init_rt_bandwidth(&tg->rt_bandwidth,
208 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
209
210 for_each_possible_cpu(i) {
211 rt_rq = kzalloc_node(sizeof(struct rt_rq),
212 GFP_KERNEL, cpu_to_node(i));
213 if (!rt_rq)
214 goto err;
215
216 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
217 GFP_KERNEL, cpu_to_node(i));
218 if (!rt_se)
219 goto err_free_rq;
220
221 init_rt_rq(rt_rq);
222 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
223 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
224 }
225
226 return 1;
227
228 err_free_rq:
229 kfree(rt_rq);
230 err:
231 return 0;
232 }
233
234 #else /* CONFIG_RT_GROUP_SCHED */
235
236 #define rt_entity_is_task(rt_se) (1)
237
rt_task_of(struct sched_rt_entity * rt_se)238 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
239 {
240 return container_of(rt_se, struct task_struct, rt);
241 }
242
rq_of_rt_rq(struct rt_rq * rt_rq)243 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
244 {
245 return container_of(rt_rq, struct rq, rt);
246 }
247
rq_of_rt_se(struct sched_rt_entity * rt_se)248 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
249 {
250 struct task_struct *p = rt_task_of(rt_se);
251
252 return task_rq(p);
253 }
254
rt_rq_of_se(struct sched_rt_entity * rt_se)255 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
256 {
257 struct rq *rq = rq_of_rt_se(rt_se);
258
259 return &rq->rt;
260 }
261
free_rt_sched_group(struct task_group * tg)262 void free_rt_sched_group(struct task_group *tg) { }
263
alloc_rt_sched_group(struct task_group * tg,struct task_group * parent)264 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
265 {
266 return 1;
267 }
268 #endif /* CONFIG_RT_GROUP_SCHED */
269
270 #ifdef CONFIG_SMP
271
272 static void pull_rt_task(struct rq *this_rq);
273
need_pull_rt_task(struct rq * rq,struct task_struct * prev)274 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
275 {
276 /*
277 * Try to pull RT tasks here if we lower this rq's prio and cpu is not
278 * isolated
279 */
280 return rq->rt.highest_prio.curr > prev->prio &&
281 !cpu_isolated(cpu_of(rq));
282 }
283
rt_overloaded(struct rq * rq)284 static inline int rt_overloaded(struct rq *rq)
285 {
286 return atomic_read(&rq->rd->rto_count);
287 }
288
rt_set_overload(struct rq * rq)289 static inline void rt_set_overload(struct rq *rq)
290 {
291 if (!rq->online)
292 return;
293
294 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
295 /*
296 * Make sure the mask is visible before we set
297 * the overload count. That is checked to determine
298 * if we should look at the mask. It would be a shame
299 * if we looked at the mask, but the mask was not
300 * updated yet.
301 *
302 * Matched by the barrier in pull_rt_task().
303 */
304 smp_wmb();
305 atomic_inc(&rq->rd->rto_count);
306 }
307
rt_clear_overload(struct rq * rq)308 static inline void rt_clear_overload(struct rq *rq)
309 {
310 if (!rq->online)
311 return;
312
313 /* the order here really doesn't matter */
314 atomic_dec(&rq->rd->rto_count);
315 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
316 }
317
update_rt_migration(struct rt_rq * rt_rq)318 static void update_rt_migration(struct rt_rq *rt_rq)
319 {
320 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
321 if (!rt_rq->overloaded) {
322 rt_set_overload(rq_of_rt_rq(rt_rq));
323 rt_rq->overloaded = 1;
324 }
325 } else if (rt_rq->overloaded) {
326 rt_clear_overload(rq_of_rt_rq(rt_rq));
327 rt_rq->overloaded = 0;
328 }
329 }
330
inc_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)331 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
332 {
333 struct task_struct *p;
334
335 if (!rt_entity_is_task(rt_se))
336 return;
337
338 p = rt_task_of(rt_se);
339 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
340
341 rt_rq->rt_nr_total++;
342 if (p->nr_cpus_allowed > 1)
343 rt_rq->rt_nr_migratory++;
344
345 update_rt_migration(rt_rq);
346 }
347
dec_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)348 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
349 {
350 struct task_struct *p;
351
352 if (!rt_entity_is_task(rt_se))
353 return;
354
355 p = rt_task_of(rt_se);
356 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
357
358 rt_rq->rt_nr_total--;
359 if (p->nr_cpus_allowed > 1)
360 rt_rq->rt_nr_migratory--;
361
362 update_rt_migration(rt_rq);
363 }
364
has_pushable_tasks(struct rq * rq)365 static inline int has_pushable_tasks(struct rq *rq)
366 {
367 return !plist_head_empty(&rq->rt.pushable_tasks);
368 }
369
370 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
371 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
372
373 static void push_rt_tasks(struct rq *);
374 static void pull_rt_task(struct rq *);
375
rt_queue_push_tasks(struct rq * rq)376 static inline void rt_queue_push_tasks(struct rq *rq)
377 {
378 if (!has_pushable_tasks(rq))
379 return;
380
381 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
382 }
383
rt_queue_pull_task(struct rq * rq)384 static inline void rt_queue_pull_task(struct rq *rq)
385 {
386 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
387 }
388
enqueue_pushable_task(struct rq * rq,struct task_struct * p)389 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
390 {
391 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
392 plist_node_init(&p->pushable_tasks, p->prio);
393 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
394
395 /* Update the highest prio pushable task */
396 if (p->prio < rq->rt.highest_prio.next)
397 rq->rt.highest_prio.next = p->prio;
398 }
399
dequeue_pushable_task(struct rq * rq,struct task_struct * p)400 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
401 {
402 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
403
404 /* Update the new highest prio pushable task */
405 if (has_pushable_tasks(rq)) {
406 p = plist_first_entry(&rq->rt.pushable_tasks,
407 struct task_struct, pushable_tasks);
408 rq->rt.highest_prio.next = p->prio;
409 } else
410 rq->rt.highest_prio.next = MAX_RT_PRIO;
411 }
412
413 #else
414
enqueue_pushable_task(struct rq * rq,struct task_struct * p)415 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
416 {
417 }
418
dequeue_pushable_task(struct rq * rq,struct task_struct * p)419 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
420 {
421 }
422
423 static inline
inc_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)424 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
425 {
426 }
427
428 static inline
dec_rt_migration(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)429 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
430 {
431 }
432
need_pull_rt_task(struct rq * rq,struct task_struct * prev)433 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
434 {
435 return false;
436 }
437
pull_rt_task(struct rq * this_rq)438 static inline void pull_rt_task(struct rq *this_rq)
439 {
440 }
441
rt_queue_push_tasks(struct rq * rq)442 static inline void rt_queue_push_tasks(struct rq *rq)
443 {
444 }
445 #endif /* CONFIG_SMP */
446
447 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
448 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
449
on_rt_rq(struct sched_rt_entity * rt_se)450 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
451 {
452 return rt_se->on_rq;
453 }
454
455 #ifdef CONFIG_UCLAMP_TASK
456 /*
457 * Verify the fitness of task @p to run on @cpu taking into account the uclamp
458 * settings.
459 *
460 * This check is only important for heterogeneous systems where uclamp_min value
461 * is higher than the capacity of a @cpu. For non-heterogeneous system this
462 * function will always return true.
463 *
464 * The function will return true if the capacity of the @cpu is >= the
465 * uclamp_min and false otherwise.
466 *
467 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
468 * > uclamp_max.
469 */
rt_task_fits_capacity(struct task_struct * p,int cpu)470 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
471 {
472 unsigned int min_cap;
473 unsigned int max_cap;
474 unsigned int cpu_cap;
475
476 /* Only heterogeneous systems can benefit from this check */
477 if (!static_branch_unlikely(&sched_asym_cpucapacity))
478 return true;
479
480 min_cap = uclamp_eff_value(p, UCLAMP_MIN);
481 max_cap = uclamp_eff_value(p, UCLAMP_MAX);
482
483 cpu_cap = capacity_orig_of(cpu);
484
485 return cpu_cap >= min(min_cap, max_cap);
486 }
487 #else
rt_task_fits_capacity(struct task_struct * p,int cpu)488 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
489 {
490 return true;
491 }
492 #endif
493
494 #ifdef CONFIG_RT_GROUP_SCHED
495
sched_rt_runtime(struct rt_rq * rt_rq)496 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
497 {
498 if (!rt_rq->tg)
499 return RUNTIME_INF;
500
501 return rt_rq->rt_runtime;
502 }
503
sched_rt_period(struct rt_rq * rt_rq)504 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
505 {
506 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
507 }
508
509 typedef struct task_group *rt_rq_iter_t;
510
next_task_group(struct task_group * tg)511 static inline struct task_group *next_task_group(struct task_group *tg)
512 {
513 do {
514 tg = list_entry_rcu(tg->list.next,
515 typeof(struct task_group), list);
516 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
517
518 if (&tg->list == &task_groups)
519 tg = NULL;
520
521 return tg;
522 }
523
524 #define for_each_rt_rq(rt_rq, iter, rq) \
525 for (iter = container_of(&task_groups, typeof(*iter), list); \
526 (iter = next_task_group(iter)) && \
527 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
528
529 #define for_each_sched_rt_entity(rt_se) \
530 for (; rt_se; rt_se = rt_se->parent)
531
group_rt_rq(struct sched_rt_entity * rt_se)532 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
533 {
534 return rt_se->my_q;
535 }
536
537 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
538 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
539
sched_rt_rq_enqueue(struct rt_rq * rt_rq)540 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
541 {
542 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
543 struct rq *rq = rq_of_rt_rq(rt_rq);
544 struct sched_rt_entity *rt_se;
545
546 int cpu = cpu_of(rq);
547
548 rt_se = rt_rq->tg->rt_se[cpu];
549
550 if (rt_rq->rt_nr_running) {
551 if (!rt_se)
552 enqueue_top_rt_rq(rt_rq);
553 else if (!on_rt_rq(rt_se))
554 enqueue_rt_entity(rt_se, 0);
555
556 if (rt_rq->highest_prio.curr < curr->prio)
557 resched_curr(rq);
558 }
559 }
560
sched_rt_rq_dequeue(struct rt_rq * rt_rq)561 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
562 {
563 struct sched_rt_entity *rt_se;
564 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
565
566 rt_se = rt_rq->tg->rt_se[cpu];
567
568 if (!rt_se) {
569 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
570 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
571 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
572 }
573 else if (on_rt_rq(rt_se))
574 dequeue_rt_entity(rt_se, 0);
575 }
576
rt_rq_throttled(struct rt_rq * rt_rq)577 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
578 {
579 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
580 }
581
rt_se_boosted(struct sched_rt_entity * rt_se)582 static int rt_se_boosted(struct sched_rt_entity *rt_se)
583 {
584 struct rt_rq *rt_rq = group_rt_rq(rt_se);
585 struct task_struct *p;
586
587 if (rt_rq)
588 return !!rt_rq->rt_nr_boosted;
589
590 p = rt_task_of(rt_se);
591 return p->prio != p->normal_prio;
592 }
593
594 #ifdef CONFIG_SMP
sched_rt_period_mask(void)595 static inline const struct cpumask *sched_rt_period_mask(void)
596 {
597 return this_rq()->rd->span;
598 }
599 #else
sched_rt_period_mask(void)600 static inline const struct cpumask *sched_rt_period_mask(void)
601 {
602 return cpu_online_mask;
603 }
604 #endif
605
606 static inline
sched_rt_period_rt_rq(struct rt_bandwidth * rt_b,int cpu)607 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
608 {
609 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
610 }
611
sched_rt_bandwidth(struct rt_rq * rt_rq)612 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
613 {
614 return &rt_rq->tg->rt_bandwidth;
615 }
616
617 #else /* !CONFIG_RT_GROUP_SCHED */
618
sched_rt_runtime(struct rt_rq * rt_rq)619 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
620 {
621 return rt_rq->rt_runtime;
622 }
623
sched_rt_period(struct rt_rq * rt_rq)624 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
625 {
626 return ktime_to_ns(def_rt_bandwidth.rt_period);
627 }
628
629 typedef struct rt_rq *rt_rq_iter_t;
630
631 #define for_each_rt_rq(rt_rq, iter, rq) \
632 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
633
634 #define for_each_sched_rt_entity(rt_se) \
635 for (; rt_se; rt_se = NULL)
636
group_rt_rq(struct sched_rt_entity * rt_se)637 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
638 {
639 return NULL;
640 }
641
sched_rt_rq_enqueue(struct rt_rq * rt_rq)642 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
643 {
644 struct rq *rq = rq_of_rt_rq(rt_rq);
645
646 if (!rt_rq->rt_nr_running)
647 return;
648
649 enqueue_top_rt_rq(rt_rq);
650 resched_curr(rq);
651 }
652
sched_rt_rq_dequeue(struct rt_rq * rt_rq)653 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
654 {
655 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
656 }
657
rt_rq_throttled(struct rt_rq * rt_rq)658 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
659 {
660 return rt_rq->rt_throttled;
661 }
662
sched_rt_period_mask(void)663 static inline const struct cpumask *sched_rt_period_mask(void)
664 {
665 return cpu_online_mask;
666 }
667
668 static inline
sched_rt_period_rt_rq(struct rt_bandwidth * rt_b,int cpu)669 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
670 {
671 return &cpu_rq(cpu)->rt;
672 }
673
sched_rt_bandwidth(struct rt_rq * rt_rq)674 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
675 {
676 return &def_rt_bandwidth;
677 }
678
679 #endif /* CONFIG_RT_GROUP_SCHED */
680
sched_rt_bandwidth_account(struct rt_rq * rt_rq)681 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
682 {
683 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
684
685 return (hrtimer_active(&rt_b->rt_period_timer) ||
686 rt_rq->rt_time < rt_b->rt_runtime);
687 }
688
689 #ifdef CONFIG_SMP
690 /*
691 * We ran out of runtime, see if we can borrow some from our neighbours.
692 */
do_balance_runtime(struct rt_rq * rt_rq)693 static void do_balance_runtime(struct rt_rq *rt_rq)
694 {
695 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
696 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
697 int i, weight;
698 u64 rt_period;
699
700 weight = cpumask_weight(rd->span);
701
702 raw_spin_lock(&rt_b->rt_runtime_lock);
703 rt_period = ktime_to_ns(rt_b->rt_period);
704 for_each_cpu(i, rd->span) {
705 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
706 s64 diff;
707
708 if (iter == rt_rq)
709 continue;
710
711 raw_spin_lock(&iter->rt_runtime_lock);
712 /*
713 * Either all rqs have inf runtime and there's nothing to steal
714 * or __disable_runtime() below sets a specific rq to inf to
715 * indicate its been disabled and disalow stealing.
716 */
717 if (iter->rt_runtime == RUNTIME_INF)
718 goto next;
719
720 /*
721 * From runqueues with spare time, take 1/n part of their
722 * spare time, but no more than our period.
723 */
724 diff = iter->rt_runtime - iter->rt_time;
725 if (diff > 0) {
726 diff = div_u64((u64)diff, weight);
727 if (rt_rq->rt_runtime + diff > rt_period)
728 diff = rt_period - rt_rq->rt_runtime;
729 iter->rt_runtime -= diff;
730 rt_rq->rt_runtime += diff;
731 if (rt_rq->rt_runtime == rt_period) {
732 raw_spin_unlock(&iter->rt_runtime_lock);
733 break;
734 }
735 }
736 next:
737 raw_spin_unlock(&iter->rt_runtime_lock);
738 }
739 raw_spin_unlock(&rt_b->rt_runtime_lock);
740 }
741
742 /*
743 * Ensure this RQ takes back all the runtime it lend to its neighbours.
744 */
__disable_runtime(struct rq * rq)745 static void __disable_runtime(struct rq *rq)
746 {
747 struct root_domain *rd = rq->rd;
748 rt_rq_iter_t iter;
749 struct rt_rq *rt_rq;
750
751 if (unlikely(!scheduler_running))
752 return;
753
754 for_each_rt_rq(rt_rq, iter, rq) {
755 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
756 s64 want;
757 int i;
758
759 raw_spin_lock(&rt_b->rt_runtime_lock);
760 raw_spin_lock(&rt_rq->rt_runtime_lock);
761 /*
762 * Either we're all inf and nobody needs to borrow, or we're
763 * already disabled and thus have nothing to do, or we have
764 * exactly the right amount of runtime to take out.
765 */
766 if (rt_rq->rt_runtime == RUNTIME_INF ||
767 rt_rq->rt_runtime == rt_b->rt_runtime)
768 goto balanced;
769 raw_spin_unlock(&rt_rq->rt_runtime_lock);
770
771 /*
772 * Calculate the difference between what we started out with
773 * and what we current have, that's the amount of runtime
774 * we lend and now have to reclaim.
775 */
776 want = rt_b->rt_runtime - rt_rq->rt_runtime;
777
778 /*
779 * Greedy reclaim, take back as much as we can.
780 */
781 for_each_cpu(i, rd->span) {
782 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
783 s64 diff;
784
785 /*
786 * Can't reclaim from ourselves or disabled runqueues.
787 */
788 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
789 continue;
790
791 raw_spin_lock(&iter->rt_runtime_lock);
792 if (want > 0) {
793 diff = min_t(s64, iter->rt_runtime, want);
794 iter->rt_runtime -= diff;
795 want -= diff;
796 } else {
797 iter->rt_runtime -= want;
798 want -= want;
799 }
800 raw_spin_unlock(&iter->rt_runtime_lock);
801
802 if (!want)
803 break;
804 }
805
806 raw_spin_lock(&rt_rq->rt_runtime_lock);
807 /*
808 * We cannot be left wanting - that would mean some runtime
809 * leaked out of the system.
810 */
811 BUG_ON(want);
812 balanced:
813 /*
814 * Disable all the borrow logic by pretending we have inf
815 * runtime - in which case borrowing doesn't make sense.
816 */
817 rt_rq->rt_runtime = RUNTIME_INF;
818 rt_rq->rt_throttled = 0;
819 raw_spin_unlock(&rt_rq->rt_runtime_lock);
820 raw_spin_unlock(&rt_b->rt_runtime_lock);
821
822 /* Make rt_rq available for pick_next_task() */
823 sched_rt_rq_enqueue(rt_rq);
824 }
825 }
826
__enable_runtime(struct rq * rq)827 static void __enable_runtime(struct rq *rq)
828 {
829 rt_rq_iter_t iter;
830 struct rt_rq *rt_rq;
831
832 if (unlikely(!scheduler_running))
833 return;
834
835 /*
836 * Reset each runqueue's bandwidth settings
837 */
838 for_each_rt_rq(rt_rq, iter, rq) {
839 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
840
841 raw_spin_lock(&rt_b->rt_runtime_lock);
842 raw_spin_lock(&rt_rq->rt_runtime_lock);
843 rt_rq->rt_runtime = rt_b->rt_runtime;
844 rt_rq->rt_time = 0;
845 rt_rq->rt_throttled = 0;
846 raw_spin_unlock(&rt_rq->rt_runtime_lock);
847 raw_spin_unlock(&rt_b->rt_runtime_lock);
848 }
849 }
850
balance_runtime(struct rt_rq * rt_rq)851 static void balance_runtime(struct rt_rq *rt_rq)
852 {
853 if (!sched_feat(RT_RUNTIME_SHARE))
854 return;
855
856 if (rt_rq->rt_time > rt_rq->rt_runtime) {
857 raw_spin_unlock(&rt_rq->rt_runtime_lock);
858 do_balance_runtime(rt_rq);
859 raw_spin_lock(&rt_rq->rt_runtime_lock);
860 }
861 }
862 #else /* !CONFIG_SMP */
balance_runtime(struct rt_rq * rt_rq)863 static inline void balance_runtime(struct rt_rq *rt_rq) {}
864 #endif /* CONFIG_SMP */
865
do_sched_rt_period_timer(struct rt_bandwidth * rt_b,int overrun)866 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
867 {
868 int i, idle = 1, throttled = 0;
869 const struct cpumask *span;
870
871 span = sched_rt_period_mask();
872 #ifdef CONFIG_RT_GROUP_SCHED
873 /*
874 * When the tasks in the task_group run on either isolated
875 * CPUs or non-isolated CPUs, whether they are isolcpus or
876 * were isolated via cpusets, check all the online rt_rq
877 * to lest the timer run on a CPU which does not service
878 * all runqueues, potentially leaving other CPUs indefinitely
879 * throttled.
880 */
881 span = cpu_online_mask;
882 #endif
883 for_each_cpu(i, span) {
884 int enqueue = 0;
885 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
886 struct rq *rq = rq_of_rt_rq(rt_rq);
887 int skip;
888
889 /*
890 * When span == cpu_online_mask, taking each rq->lock
891 * can be time-consuming. Try to avoid it when possible.
892 */
893 raw_spin_lock(&rt_rq->rt_runtime_lock);
894 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
895 rt_rq->rt_runtime = rt_b->rt_runtime;
896 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
897 raw_spin_unlock(&rt_rq->rt_runtime_lock);
898 if (skip)
899 continue;
900
901 raw_spin_lock(&rq->lock);
902 update_rq_clock(rq);
903
904 if (rt_rq->rt_time) {
905 u64 runtime;
906
907 raw_spin_lock(&rt_rq->rt_runtime_lock);
908 if (rt_rq->rt_throttled)
909 balance_runtime(rt_rq);
910 runtime = rt_rq->rt_runtime;
911 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
912 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
913 rt_rq->rt_throttled = 0;
914 enqueue = 1;
915
916 /*
917 * When we're idle and a woken (rt) task is
918 * throttled check_preempt_curr() will set
919 * skip_update and the time between the wakeup
920 * and this unthrottle will get accounted as
921 * 'runtime'.
922 */
923 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
924 rq_clock_cancel_skipupdate(rq);
925 }
926 if (rt_rq->rt_time || rt_rq->rt_nr_running)
927 idle = 0;
928 raw_spin_unlock(&rt_rq->rt_runtime_lock);
929 } else if (rt_rq->rt_nr_running) {
930 idle = 0;
931 if (!rt_rq_throttled(rt_rq))
932 enqueue = 1;
933 }
934 if (rt_rq->rt_throttled)
935 throttled = 1;
936
937 if (enqueue)
938 sched_rt_rq_enqueue(rt_rq);
939 raw_spin_unlock(&rq->lock);
940 }
941
942 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
943 return 1;
944
945 return idle;
946 }
947
rt_se_prio(struct sched_rt_entity * rt_se)948 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
949 {
950 #ifdef CONFIG_RT_GROUP_SCHED
951 struct rt_rq *rt_rq = group_rt_rq(rt_se);
952
953 if (rt_rq)
954 return rt_rq->highest_prio.curr;
955 #endif
956
957 return rt_task_of(rt_se)->prio;
958 }
959
try_start_rt_bandwidth(struct rt_bandwidth * rt_b)960 static inline void try_start_rt_bandwidth(struct rt_bandwidth *rt_b)
961 {
962 raw_spin_lock(&rt_b->rt_runtime_lock);
963 if (!rt_b->rt_period_active) {
964 rt_b->rt_period_active = 1;
965 hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
966 hrtimer_start_expires(&rt_b->rt_period_timer,
967 HRTIMER_MODE_ABS_PINNED_HARD);
968 }
969 raw_spin_unlock(&rt_b->rt_runtime_lock);
970 }
971
sched_rt_runtime_exceeded(struct rt_rq * rt_rq)972 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
973 {
974 u64 runtime = sched_rt_runtime(rt_rq);
975
976 if (rt_rq->rt_throttled)
977 return rt_rq_throttled(rt_rq);
978
979 if (runtime >= sched_rt_period(rt_rq))
980 return 0;
981
982 balance_runtime(rt_rq);
983 runtime = sched_rt_runtime(rt_rq);
984 if (runtime == RUNTIME_INF)
985 return 0;
986
987 if (rt_rq->rt_time > runtime) {
988 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
989
990 /*
991 * Don't actually throttle groups that have no runtime assigned
992 * but accrue some time due to boosting.
993 */
994 if (likely(rt_b->rt_runtime)) {
995 rt_rq->rt_throttled = 1;
996 printk_deferred_once("sched: RT throttling activated\n");
997 } else {
998 /*
999 * In case we did anyway, make it go away,
1000 * replenishment is a joke, since it will replenish us
1001 * with exactly 0 ns.
1002 */
1003 rt_rq->rt_time = 0;
1004 }
1005
1006 if (rt_rq_throttled(rt_rq)) {
1007 sched_rt_rq_dequeue(rt_rq);
1008 return 1;
1009 }
1010 }
1011
1012 return 0;
1013 }
1014
1015 /*
1016 * Update the current task's runtime statistics. Skip current tasks that
1017 * are not in our scheduling class.
1018 */
update_curr_rt(struct rq * rq)1019 static void update_curr_rt(struct rq *rq)
1020 {
1021 struct task_struct *curr = rq->curr;
1022 struct sched_rt_entity *rt_se = &curr->rt;
1023 u64 delta_exec;
1024 u64 now;
1025
1026 if (curr->sched_class != &rt_sched_class)
1027 return;
1028
1029 now = rq_clock_task(rq);
1030 delta_exec = now - curr->se.exec_start;
1031 if (unlikely((s64)delta_exec <= 0))
1032 return;
1033
1034 schedstat_set(curr->se.statistics.exec_max,
1035 max(curr->se.statistics.exec_max, delta_exec));
1036
1037 curr->se.sum_exec_runtime += delta_exec;
1038 account_group_exec_runtime(curr, delta_exec);
1039
1040 curr->se.exec_start = now;
1041 cgroup_account_cputime(curr, delta_exec);
1042
1043 if (!rt_bandwidth_enabled())
1044 return;
1045
1046 for_each_sched_rt_entity(rt_se) {
1047 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1048 int exceeded;
1049
1050 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1051 raw_spin_lock(&rt_rq->rt_runtime_lock);
1052 rt_rq->rt_time += delta_exec;
1053 exceeded = sched_rt_runtime_exceeded(rt_rq);
1054 if (exceeded)
1055 resched_curr(rq);
1056 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1057 if (exceeded)
1058 try_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1059 }
1060 }
1061 }
1062
1063 static void
dequeue_top_rt_rq(struct rt_rq * rt_rq,unsigned int count)1064 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
1065 {
1066 struct rq *rq = rq_of_rt_rq(rt_rq);
1067
1068 BUG_ON(&rq->rt != rt_rq);
1069
1070 if (!rt_rq->rt_queued)
1071 return;
1072
1073 BUG_ON(!rq->nr_running);
1074
1075 sub_nr_running(rq, count);
1076 rt_rq->rt_queued = 0;
1077
1078 }
1079
1080 static void
enqueue_top_rt_rq(struct rt_rq * rt_rq)1081 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1082 {
1083 struct rq *rq = rq_of_rt_rq(rt_rq);
1084
1085 BUG_ON(&rq->rt != rt_rq);
1086
1087 if (rt_rq->rt_queued)
1088 return;
1089
1090 if (rt_rq_throttled(rt_rq))
1091 return;
1092
1093 if (rt_rq->rt_nr_running) {
1094 add_nr_running(rq, rt_rq->rt_nr_running);
1095 rt_rq->rt_queued = 1;
1096 }
1097
1098 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1099 cpufreq_update_util(rq, 0);
1100 }
1101
1102 #if defined CONFIG_SMP
1103
1104 static void
inc_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1105 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1106 {
1107 struct rq *rq = rq_of_rt_rq(rt_rq);
1108
1109 #ifdef CONFIG_RT_GROUP_SCHED
1110 /*
1111 * Change rq's cpupri only if rt_rq is the top queue.
1112 */
1113 if (&rq->rt != rt_rq)
1114 return;
1115 #endif
1116 if (rq->online && prio < prev_prio)
1117 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1118 }
1119
1120 static void
dec_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1121 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1122 {
1123 struct rq *rq = rq_of_rt_rq(rt_rq);
1124
1125 #ifdef CONFIG_RT_GROUP_SCHED
1126 /*
1127 * Change rq's cpupri only if rt_rq is the top queue.
1128 */
1129 if (&rq->rt != rt_rq)
1130 return;
1131 #endif
1132 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1133 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1134 }
1135
1136 #else /* CONFIG_SMP */
1137
1138 static inline
inc_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1139 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1140 static inline
dec_rt_prio_smp(struct rt_rq * rt_rq,int prio,int prev_prio)1141 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1142
1143 #endif /* CONFIG_SMP */
1144
1145 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1146 static void
inc_rt_prio(struct rt_rq * rt_rq,int prio)1147 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1148 {
1149 int prev_prio = rt_rq->highest_prio.curr;
1150
1151 if (prio < prev_prio)
1152 rt_rq->highest_prio.curr = prio;
1153
1154 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1155 }
1156
1157 static void
dec_rt_prio(struct rt_rq * rt_rq,int prio)1158 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1159 {
1160 int prev_prio = rt_rq->highest_prio.curr;
1161
1162 if (rt_rq->rt_nr_running) {
1163
1164 WARN_ON(prio < prev_prio);
1165
1166 /*
1167 * This may have been our highest task, and therefore
1168 * we may have some recomputation to do
1169 */
1170 if (prio == prev_prio) {
1171 struct rt_prio_array *array = &rt_rq->active;
1172
1173 rt_rq->highest_prio.curr =
1174 sched_find_first_bit(array->bitmap);
1175 }
1176
1177 } else
1178 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1179
1180 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1181 }
1182
1183 #else
1184
inc_rt_prio(struct rt_rq * rt_rq,int prio)1185 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
dec_rt_prio(struct rt_rq * rt_rq,int prio)1186 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1187
1188 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1189
1190 #ifdef CONFIG_RT_GROUP_SCHED
1191
1192 static void
inc_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1193 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1194 {
1195 if (rt_se_boosted(rt_se))
1196 rt_rq->rt_nr_boosted++;
1197
1198 if (rt_rq->tg)
1199 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1200 }
1201
1202 static void
dec_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1203 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1204 {
1205 if (rt_se_boosted(rt_se))
1206 rt_rq->rt_nr_boosted--;
1207
1208 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1209 }
1210
1211 #else /* CONFIG_RT_GROUP_SCHED */
1212
1213 static void
inc_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1214 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1215 {
1216 start_rt_bandwidth(&def_rt_bandwidth);
1217 }
1218
1219 static inline
dec_rt_group(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1220 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1221
1222 #endif /* CONFIG_RT_GROUP_SCHED */
1223
1224 static inline
rt_se_nr_running(struct sched_rt_entity * rt_se)1225 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1226 {
1227 struct rt_rq *group_rq = group_rt_rq(rt_se);
1228
1229 if (group_rq)
1230 return group_rq->rt_nr_running;
1231 else
1232 return 1;
1233 }
1234
1235 static inline
rt_se_rr_nr_running(struct sched_rt_entity * rt_se)1236 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1237 {
1238 struct rt_rq *group_rq = group_rt_rq(rt_se);
1239 struct task_struct *tsk;
1240
1241 if (group_rq)
1242 return group_rq->rr_nr_running;
1243
1244 tsk = rt_task_of(rt_se);
1245
1246 return (tsk->policy == SCHED_RR) ? 1 : 0;
1247 }
1248
1249 static inline
inc_rt_tasks(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1250 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1251 {
1252 int prio = rt_se_prio(rt_se);
1253
1254 WARN_ON(!rt_prio(prio));
1255 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1256 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1257
1258 inc_rt_prio(rt_rq, prio);
1259 inc_rt_migration(rt_se, rt_rq);
1260 inc_rt_group(rt_se, rt_rq);
1261 }
1262
1263 static inline
dec_rt_tasks(struct sched_rt_entity * rt_se,struct rt_rq * rt_rq)1264 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1265 {
1266 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1267 WARN_ON(!rt_rq->rt_nr_running);
1268 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1269 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1270
1271 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1272 dec_rt_migration(rt_se, rt_rq);
1273 dec_rt_group(rt_se, rt_rq);
1274 }
1275
1276 /*
1277 * Change rt_se->run_list location unless SAVE && !MOVE
1278 *
1279 * assumes ENQUEUE/DEQUEUE flags match
1280 */
move_entity(unsigned int flags)1281 static inline bool move_entity(unsigned int flags)
1282 {
1283 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1284 return false;
1285
1286 return true;
1287 }
1288
__delist_rt_entity(struct sched_rt_entity * rt_se,struct rt_prio_array * array)1289 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1290 {
1291 list_del_init(&rt_se->run_list);
1292
1293 if (list_empty(array->queue + rt_se_prio(rt_se)))
1294 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1295
1296 rt_se->on_list = 0;
1297 }
1298
__enqueue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1299 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1300 {
1301 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1302 struct rt_prio_array *array = &rt_rq->active;
1303 struct rt_rq *group_rq = group_rt_rq(rt_se);
1304 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1305
1306 /*
1307 * Don't enqueue the group if its throttled, or when empty.
1308 * The latter is a consequence of the former when a child group
1309 * get throttled and the current group doesn't have any other
1310 * active members.
1311 */
1312 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1313 if (rt_se->on_list)
1314 __delist_rt_entity(rt_se, array);
1315 return;
1316 }
1317
1318 if (move_entity(flags)) {
1319 WARN_ON_ONCE(rt_se->on_list);
1320 if (flags & ENQUEUE_HEAD)
1321 list_add(&rt_se->run_list, queue);
1322 else
1323 list_add_tail(&rt_se->run_list, queue);
1324
1325 __set_bit(rt_se_prio(rt_se), array->bitmap);
1326 rt_se->on_list = 1;
1327 }
1328 rt_se->on_rq = 1;
1329
1330 inc_rt_tasks(rt_se, rt_rq);
1331 }
1332
__dequeue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1333 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1334 {
1335 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1336 struct rt_prio_array *array = &rt_rq->active;
1337
1338 if (move_entity(flags)) {
1339 WARN_ON_ONCE(!rt_se->on_list);
1340 __delist_rt_entity(rt_se, array);
1341 }
1342 rt_se->on_rq = 0;
1343
1344 dec_rt_tasks(rt_se, rt_rq);
1345 }
1346
1347 /*
1348 * Because the prio of an upper entry depends on the lower
1349 * entries, we must remove entries top - down.
1350 */
dequeue_rt_stack(struct sched_rt_entity * rt_se,unsigned int flags)1351 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1352 {
1353 struct sched_rt_entity *back = NULL;
1354 unsigned int rt_nr_running;
1355
1356 for_each_sched_rt_entity(rt_se) {
1357 rt_se->back = back;
1358 back = rt_se;
1359 }
1360
1361 rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
1362
1363 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1364 if (on_rt_rq(rt_se))
1365 __dequeue_rt_entity(rt_se, flags);
1366 }
1367
1368 dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
1369 }
1370
enqueue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1371 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1372 {
1373 struct rq *rq = rq_of_rt_se(rt_se);
1374
1375 dequeue_rt_stack(rt_se, flags);
1376 for_each_sched_rt_entity(rt_se)
1377 __enqueue_rt_entity(rt_se, flags);
1378 enqueue_top_rt_rq(&rq->rt);
1379 }
1380
dequeue_rt_entity(struct sched_rt_entity * rt_se,unsigned int flags)1381 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1382 {
1383 struct rq *rq = rq_of_rt_se(rt_se);
1384
1385 dequeue_rt_stack(rt_se, flags);
1386
1387 for_each_sched_rt_entity(rt_se) {
1388 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1389
1390 if (rt_rq && rt_rq->rt_nr_running)
1391 __enqueue_rt_entity(rt_se, flags);
1392 }
1393 enqueue_top_rt_rq(&rq->rt);
1394 }
1395
1396 /*
1397 * Adding/removing a task to/from a priority array:
1398 */
1399 static void
enqueue_task_rt(struct rq * rq,struct task_struct * p,int flags)1400 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1401 {
1402 struct sched_rt_entity *rt_se = &p->rt;
1403
1404 if (flags & ENQUEUE_WAKEUP)
1405 rt_se->timeout = 0;
1406
1407 enqueue_rt_entity(rt_se, flags);
1408 walt_inc_cumulative_runnable_avg(rq, p);
1409
1410 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1411 enqueue_pushable_task(rq, p);
1412 }
1413
dequeue_task_rt(struct rq * rq,struct task_struct * p,int flags)1414 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1415 {
1416 struct sched_rt_entity *rt_se = &p->rt;
1417
1418 update_curr_rt(rq);
1419 dequeue_rt_entity(rt_se, flags);
1420 walt_dec_cumulative_runnable_avg(rq, p);
1421
1422 dequeue_pushable_task(rq, p);
1423 }
1424
1425 /*
1426 * Put task to the head or the end of the run list without the overhead of
1427 * dequeue followed by enqueue.
1428 */
1429 static void
requeue_rt_entity(struct rt_rq * rt_rq,struct sched_rt_entity * rt_se,int head)1430 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1431 {
1432 if (on_rt_rq(rt_se)) {
1433 struct rt_prio_array *array = &rt_rq->active;
1434 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1435
1436 if (head)
1437 list_move(&rt_se->run_list, queue);
1438 else
1439 list_move_tail(&rt_se->run_list, queue);
1440 }
1441 }
1442
requeue_task_rt(struct rq * rq,struct task_struct * p,int head)1443 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1444 {
1445 struct sched_rt_entity *rt_se = &p->rt;
1446 struct rt_rq *rt_rq;
1447
1448 for_each_sched_rt_entity(rt_se) {
1449 rt_rq = rt_rq_of_se(rt_se);
1450 requeue_rt_entity(rt_rq, rt_se, head);
1451 }
1452 }
1453
yield_task_rt(struct rq * rq)1454 static void yield_task_rt(struct rq *rq)
1455 {
1456 requeue_task_rt(rq, rq->curr, 0);
1457 }
1458
1459 #ifdef CONFIG_SMP
1460 static int find_lowest_rq(struct task_struct *task);
1461
1462 static int
select_task_rq_rt(struct task_struct * p,int cpu,int sd_flag,int flags)1463 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1464 {
1465 struct task_struct *curr;
1466 struct rq *rq;
1467 bool test;
1468
1469 /* For anything but wake ups, just return the task_cpu */
1470 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1471 goto out;
1472
1473 rq = cpu_rq(cpu);
1474
1475 rcu_read_lock();
1476 curr = READ_ONCE(rq->curr); /* unlocked access */
1477
1478 /*
1479 * If the current task on @p's runqueue is an RT task, then
1480 * try to see if we can wake this RT task up on another
1481 * runqueue. Otherwise simply start this RT task
1482 * on its current runqueue.
1483 *
1484 * We want to avoid overloading runqueues. If the woken
1485 * task is a higher priority, then it will stay on this CPU
1486 * and the lower prio task should be moved to another CPU.
1487 * Even though this will probably make the lower prio task
1488 * lose its cache, we do not want to bounce a higher task
1489 * around just because it gave up its CPU, perhaps for a
1490 * lock?
1491 *
1492 * For equal prio tasks, we just let the scheduler sort it out.
1493 *
1494 * Otherwise, just let it ride on the affined RQ and the
1495 * post-schedule router will push the preempted task away
1496 *
1497 * This test is optimistic, if we get it wrong the load-balancer
1498 * will have to sort it out.
1499 *
1500 * We take into account the capacity of the CPU to ensure it fits the
1501 * requirement of the task - which is only important on heterogeneous
1502 * systems like big.LITTLE.
1503 */
1504 test = curr &&
1505 unlikely(rt_task(curr)) &&
1506 (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
1507 #ifdef CONFIG_SCHED_RT_CAS
1508 test |= sysctl_sched_enable_rt_cas;
1509 #endif
1510
1511 if (test || !rt_task_fits_capacity(p, cpu)) {
1512 int target = find_lowest_rq(p);
1513
1514 /*
1515 * Bail out if we were forcing a migration to find a better
1516 * fitting CPU but our search failed.
1517 */
1518 if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1519 goto out_unlock;
1520
1521 /*
1522 * Don't bother moving it if the destination CPU is
1523 * not running a lower priority task.
1524 */
1525 if (target != -1 && (
1526 #ifdef CONFIG_SCHED_RT_CAS
1527 sysctl_sched_enable_rt_cas ||
1528 #endif
1529 p->prio < cpu_rq(target)->rt.highest_prio.curr))
1530 cpu = target;
1531 }
1532
1533 out_unlock:
1534 rcu_read_unlock();
1535
1536 out:
1537 return cpu;
1538 }
1539
check_preempt_equal_prio(struct rq * rq,struct task_struct * p)1540 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1541 {
1542 /*
1543 * Current can't be migrated, useless to reschedule,
1544 * let's hope p can move out.
1545 */
1546 if (rq->curr->nr_cpus_allowed == 1 ||
1547 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1548 return;
1549
1550 /*
1551 * p is migratable, so let's not schedule it and
1552 * see if it is pushed or pulled somewhere else.
1553 */
1554 if (p->nr_cpus_allowed != 1 &&
1555 cpupri_find(&rq->rd->cpupri, p, NULL))
1556 return;
1557
1558 /*
1559 * There appear to be other CPUs that can accept
1560 * the current task but none can run 'p', so lets reschedule
1561 * to try and push the current task away:
1562 */
1563 requeue_task_rt(rq, p, 1);
1564 resched_curr(rq);
1565 }
1566
balance_rt(struct rq * rq,struct task_struct * p,struct rq_flags * rf)1567 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1568 {
1569 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1570 /*
1571 * This is OK, because current is on_cpu, which avoids it being
1572 * picked for load-balance and preemption/IRQs are still
1573 * disabled avoiding further scheduler activity on it and we've
1574 * not yet started the picking loop.
1575 */
1576 rq_unpin_lock(rq, rf);
1577 pull_rt_task(rq);
1578 rq_repin_lock(rq, rf);
1579 }
1580
1581 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1582 }
1583 #endif /* CONFIG_SMP */
1584
1585 /*
1586 * Preempt the current task with a newly woken task if needed:
1587 */
check_preempt_curr_rt(struct rq * rq,struct task_struct * p,int flags)1588 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1589 {
1590 if (p->prio < rq->curr->prio) {
1591 resched_curr(rq);
1592 return;
1593 }
1594
1595 #ifdef CONFIG_SMP
1596 /*
1597 * If:
1598 *
1599 * - the newly woken task is of equal priority to the current task
1600 * - the newly woken task is non-migratable while current is migratable
1601 * - current will be preempted on the next reschedule
1602 *
1603 * we should check to see if current can readily move to a different
1604 * cpu. If so, we will reschedule to allow the push logic to try
1605 * to move current somewhere else, making room for our non-migratable
1606 * task.
1607 */
1608 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1609 check_preempt_equal_prio(rq, p);
1610 #endif
1611 }
1612
set_next_task_rt(struct rq * rq,struct task_struct * p,bool first)1613 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1614 {
1615 p->se.exec_start = rq_clock_task(rq);
1616
1617 /* The running task is never eligible for pushing */
1618 dequeue_pushable_task(rq, p);
1619
1620 if (!first)
1621 return;
1622
1623 /*
1624 * If prev task was rt, put_prev_task() has already updated the
1625 * utilization. We only care of the case where we start to schedule a
1626 * rt task
1627 */
1628 if (rq->curr->sched_class != &rt_sched_class)
1629 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1630
1631 rt_queue_push_tasks(rq);
1632 }
1633
pick_next_rt_entity(struct rt_rq * rt_rq)1634 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
1635 {
1636 struct rt_prio_array *array = &rt_rq->active;
1637 struct sched_rt_entity *next = NULL;
1638 struct list_head *queue;
1639 int idx;
1640
1641 idx = sched_find_first_bit(array->bitmap);
1642 BUG_ON(idx >= MAX_RT_PRIO);
1643
1644 queue = array->queue + idx;
1645 if (SCHED_WARN_ON(list_empty(queue)))
1646 return NULL;
1647 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1648
1649 return next;
1650 }
1651
_pick_next_task_rt(struct rq * rq)1652 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1653 {
1654 struct sched_rt_entity *rt_se;
1655 struct rt_rq *rt_rq = &rq->rt;
1656
1657 do {
1658 rt_se = pick_next_rt_entity(rt_rq);
1659 if (unlikely(!rt_se))
1660 return NULL;
1661 rt_rq = group_rt_rq(rt_se);
1662 } while (rt_rq);
1663
1664 return rt_task_of(rt_se);
1665 }
1666
pick_next_task_rt(struct rq * rq)1667 static struct task_struct *pick_next_task_rt(struct rq *rq)
1668 {
1669 struct task_struct *p;
1670
1671 if (!sched_rt_runnable(rq))
1672 return NULL;
1673
1674 p = _pick_next_task_rt(rq);
1675 set_next_task_rt(rq, p, true);
1676 return p;
1677 }
1678
put_prev_task_rt(struct rq * rq,struct task_struct * p)1679 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1680 {
1681 update_curr_rt(rq);
1682
1683 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1684
1685 /*
1686 * The previous task needs to be made eligible for pushing
1687 * if it is still active
1688 */
1689 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1690 enqueue_pushable_task(rq, p);
1691 }
1692
1693 #ifdef CONFIG_SMP
1694
1695 /* Only try algorithms three times */
1696 #define RT_MAX_TRIES 3
1697
pick_rt_task(struct rq * rq,struct task_struct * p,int cpu)1698 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1699 {
1700 if (!task_running(rq, p) &&
1701 cpumask_test_cpu(cpu, p->cpus_ptr))
1702 return 1;
1703
1704 return 0;
1705 }
1706
1707 /*
1708 * Return the highest pushable rq's task, which is suitable to be executed
1709 * on the CPU, NULL otherwise
1710 */
pick_highest_pushable_task(struct rq * rq,int cpu)1711 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1712 {
1713 struct plist_head *head = &rq->rt.pushable_tasks;
1714 struct task_struct *p;
1715
1716 if (!has_pushable_tasks(rq))
1717 return NULL;
1718
1719 plist_for_each_entry(p, head, pushable_tasks) {
1720 if (pick_rt_task(rq, p, cpu))
1721 return p;
1722 }
1723
1724 return NULL;
1725 }
1726
1727 #ifdef CONFIG_SCHED_RT_CAS
find_cas_cpu(struct sched_domain * sd,struct task_struct * task,struct cpumask * lowest_mask)1728 static int find_cas_cpu(struct sched_domain *sd,
1729 struct task_struct *task, struct cpumask *lowest_mask)
1730 {
1731 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
1732 struct sched_group *sg = NULL;
1733 struct sched_group *sg_target = NULL;
1734 struct sched_group *sg_backup = NULL;
1735 struct cpumask search_cpu, backup_search_cpu;
1736 int cpu = -1;
1737 int target_cpu = -1;
1738 unsigned long cpu_capacity;
1739 unsigned long boosted_tutil = uclamp_task_util(task, uclamp_eff_value(task, UCLAMP_MIN), uclamp_eff_value(task, UCLAMP_MAX));
1740 unsigned long target_capacity = ULONG_MAX;
1741 unsigned long util;
1742 unsigned long target_cpu_util = ULONG_MAX;
1743 int prev_cpu = task_cpu(task);
1744 #ifdef CONFIG_SCHED_RTG
1745 struct cpumask *rtg_target = NULL;
1746 #endif
1747 bool boosted = uclamp_boosted(task);
1748
1749 if (!sysctl_sched_enable_rt_cas)
1750 return -1;
1751
1752 rcu_read_lock();
1753
1754 #ifdef CONFIG_SCHED_RTG
1755 rtg_target = find_rtg_target(task);
1756 #endif
1757
1758 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, 0));
1759 if (!sd) {
1760 rcu_read_unlock();
1761 return -1;
1762 }
1763
1764 sg = sd->groups;
1765 do {
1766 if (!cpumask_intersects(lowest_mask, sched_group_span(sg)))
1767 continue;
1768
1769 if (boosted) {
1770 if (cpumask_test_cpu(rd->max_cap_orig_cpu,
1771 sched_group_span(sg))) {
1772 sg_target = sg;
1773 break;
1774 }
1775 }
1776
1777 cpu = group_first_cpu(sg);
1778 #ifdef CONFIG_SCHED_RTG
1779 /* honor the rtg tasks */
1780 if (rtg_target) {
1781 if (cpumask_test_cpu(cpu, rtg_target)) {
1782 sg_target = sg;
1783 break;
1784 }
1785
1786 /* active LB or big_task favor cpus with more capacity */
1787 if (task->state == TASK_RUNNING || boosted) {
1788 if (capacity_orig_of(cpu) >
1789 capacity_orig_of(cpumask_any(rtg_target))) {
1790 sg_target = sg;
1791 break;
1792 }
1793
1794 sg_backup = sg;
1795 continue;
1796 }
1797 }
1798 #endif
1799 /*
1800 * 1. add margin to support task migration
1801 * 2. if task_util is high then all cpus, make sure the
1802 * sg_backup with the most powerful cpus is selected
1803 */
1804 if (!rt_task_fits_capacity(task, cpu)) {
1805 sg_backup = sg;
1806 continue;
1807 }
1808
1809 /* support task boost */
1810 cpu_capacity = capacity_orig_of(cpu);
1811 if (boosted_tutil > cpu_capacity) {
1812 sg_backup = sg;
1813 continue;
1814 }
1815
1816 /* sg_target: select the sg with smaller capacity */
1817 if (cpu_capacity < target_capacity) {
1818 target_capacity = cpu_capacity;
1819 sg_target = sg;
1820 }
1821 } while (sg = sg->next, sg != sd->groups);
1822
1823 if (!sg_target)
1824 sg_target = sg_backup;
1825
1826 if (sg_target) {
1827 cpumask_and(&search_cpu, lowest_mask, sched_group_span(sg_target));
1828 cpumask_copy(&backup_search_cpu, lowest_mask);
1829 cpumask_andnot(&backup_search_cpu, &backup_search_cpu, &search_cpu);
1830 } else {
1831 cpumask_copy(&search_cpu, lowest_mask);
1832 cpumask_clear(&backup_search_cpu);
1833 }
1834
1835 retry:
1836 cpu = cpumask_first(&search_cpu);
1837 do {
1838 trace_sched_find_cas_cpu_each(task, cpu, target_cpu,
1839 cpu_isolated(cpu),
1840 idle_cpu(cpu), boosted_tutil, cpu_util(cpu),
1841 capacity_orig_of(cpu));
1842
1843 if (cpu_isolated(cpu))
1844 continue;
1845
1846 if (!cpumask_test_cpu(cpu, task->cpus_ptr))
1847 continue;
1848
1849 /* find best cpu with smallest max_capacity */
1850 if (target_cpu != -1 &&
1851 capacity_orig_of(cpu) > capacity_orig_of(target_cpu))
1852 continue;
1853
1854 util = cpu_util(cpu);
1855
1856 /* Find the least loaded CPU */
1857 if (util > target_cpu_util)
1858 continue;
1859
1860 /*
1861 * If the preivous CPU has same load, keep it as
1862 * target_cpu
1863 */
1864 if (target_cpu_util == util && target_cpu == prev_cpu)
1865 continue;
1866
1867 /*
1868 * If candidate CPU is the previous CPU, select it.
1869 * If all above conditions are same, select the least
1870 * cumulative window demand CPU.
1871 */
1872 target_cpu_util = util;
1873 target_cpu = cpu;
1874 } while ((cpu = cpumask_next(cpu, &search_cpu)) < nr_cpu_ids);
1875
1876 if (target_cpu != -1 && cpumask_test_cpu(target_cpu, lowest_mask)) {
1877 goto done;
1878 } else if (!cpumask_empty(&backup_search_cpu)) {
1879 cpumask_copy(&search_cpu, &backup_search_cpu);
1880 cpumask_clear(&backup_search_cpu);
1881 goto retry;
1882 }
1883
1884 done:
1885 trace_sched_find_cas_cpu(task, lowest_mask, boosted_tutil, prev_cpu, target_cpu);
1886 rcu_read_unlock();
1887 return target_cpu;
1888 }
1889 #endif
1890
1891 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1892
find_lowest_rq(struct task_struct * task)1893 static int find_lowest_rq(struct task_struct *task)
1894 {
1895 struct sched_domain *sd;
1896 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1897 int this_cpu = smp_processor_id();
1898 int cpu = task_cpu(task);
1899 int ret;
1900 #ifdef CONFIG_SCHED_RT_CAS
1901 int cas_cpu;
1902 #endif
1903
1904 /* Make sure the mask is initialized first */
1905 if (unlikely(!lowest_mask))
1906 return -1;
1907
1908 if (task->nr_cpus_allowed == 1)
1909 return -1; /* No other targets possible */
1910
1911 /*
1912 * If we're on asym system ensure we consider the different capacities
1913 * of the CPUs when searching for the lowest_mask.
1914 */
1915 if (static_branch_unlikely(&sched_asym_cpucapacity)) {
1916
1917 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1918 task, lowest_mask,
1919 rt_task_fits_capacity);
1920 } else {
1921
1922 ret = cpupri_find(&task_rq(task)->rd->cpupri,
1923 task, lowest_mask);
1924 }
1925
1926 if (!ret)
1927 return -1; /* No targets found */
1928
1929 #ifdef CONFIG_SCHED_RT_CAS
1930 cas_cpu = find_cas_cpu(sd, task, lowest_mask);
1931 if (cas_cpu != -1)
1932 return cas_cpu;
1933 #endif
1934
1935 /*
1936 * At this point we have built a mask of CPUs representing the
1937 * lowest priority tasks in the system. Now we want to elect
1938 * the best one based on our affinity and topology.
1939 *
1940 * We prioritize the last CPU that the task executed on since
1941 * it is most likely cache-hot in that location.
1942 */
1943 if (cpumask_test_cpu(cpu, lowest_mask))
1944 return cpu;
1945
1946 /*
1947 * Otherwise, we consult the sched_domains span maps to figure
1948 * out which CPU is logically closest to our hot cache data.
1949 */
1950 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1951 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1952
1953 rcu_read_lock();
1954 for_each_domain(cpu, sd) {
1955 if (sd->flags & SD_WAKE_AFFINE) {
1956 int best_cpu;
1957
1958 /*
1959 * "this_cpu" is cheaper to preempt than a
1960 * remote processor.
1961 */
1962 if (this_cpu != -1 &&
1963 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1964 rcu_read_unlock();
1965 return this_cpu;
1966 }
1967
1968 best_cpu = cpumask_first_and(lowest_mask,
1969 sched_domain_span(sd));
1970 if (best_cpu < nr_cpu_ids) {
1971 rcu_read_unlock();
1972 return best_cpu;
1973 }
1974 }
1975 }
1976 rcu_read_unlock();
1977
1978 /*
1979 * And finally, if there were no matches within the domains
1980 * just give the caller *something* to work with from the compatible
1981 * locations.
1982 */
1983 if (this_cpu != -1)
1984 return this_cpu;
1985
1986 cpu = cpumask_any(lowest_mask);
1987 if (cpu < nr_cpu_ids)
1988 return cpu;
1989
1990 return -1;
1991 }
1992
pick_next_pushable_task(struct rq * rq)1993 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1994 {
1995 struct task_struct *p;
1996
1997 if (!has_pushable_tasks(rq))
1998 return NULL;
1999
2000 p = plist_first_entry(&rq->rt.pushable_tasks,
2001 struct task_struct, pushable_tasks);
2002
2003 BUG_ON(rq->cpu != task_cpu(p));
2004 BUG_ON(task_current(rq, p));
2005 BUG_ON(p->nr_cpus_allowed <= 1);
2006
2007 BUG_ON(!task_on_rq_queued(p));
2008 BUG_ON(!rt_task(p));
2009
2010 return p;
2011 }
2012
2013 /* Will lock the rq it finds */
find_lock_lowest_rq(struct task_struct * task,struct rq * rq)2014 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
2015 {
2016 struct rq *lowest_rq = NULL;
2017 int tries;
2018 int cpu;
2019
2020 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
2021 cpu = find_lowest_rq(task);
2022
2023 if ((cpu == -1) || (cpu == rq->cpu))
2024 break;
2025
2026 lowest_rq = cpu_rq(cpu);
2027
2028 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
2029 /*
2030 * Target rq has tasks of equal or higher priority,
2031 * retrying does not release any lock and is unlikely
2032 * to yield a different result.
2033 */
2034 lowest_rq = NULL;
2035 break;
2036 }
2037
2038 /* if the prio of this runqueue changed, try again */
2039 if (double_lock_balance(rq, lowest_rq)) {
2040 /*
2041 * We had to unlock the run queue. In
2042 * the mean time, task could have
2043 * migrated already or had its affinity changed.
2044 */
2045 struct task_struct *next_task = pick_next_pushable_task(rq);
2046 if (unlikely(next_task != task ||
2047 !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr))) {
2048 double_unlock_balance(rq, lowest_rq);
2049 lowest_rq = NULL;
2050 break;
2051 }
2052 }
2053
2054 /* If this rq is still suitable use it. */
2055 if (lowest_rq->rt.highest_prio.curr > task->prio)
2056 break;
2057
2058 /* try again */
2059 double_unlock_balance(rq, lowest_rq);
2060 lowest_rq = NULL;
2061 }
2062
2063 return lowest_rq;
2064 }
2065
2066 /*
2067 * If the current CPU has more than one RT task, see if the non
2068 * running task can migrate over to a CPU that is running a task
2069 * of lesser priority.
2070 */
push_rt_task(struct rq * rq)2071 static int push_rt_task(struct rq *rq)
2072 {
2073 struct task_struct *next_task;
2074 struct rq *lowest_rq;
2075 int ret = 0;
2076
2077 if (!rq->rt.overloaded)
2078 return 0;
2079
2080 next_task = pick_next_pushable_task(rq);
2081 if (!next_task)
2082 return 0;
2083
2084 retry:
2085 if (WARN_ON(next_task == rq->curr))
2086 return 0;
2087
2088 /*
2089 * It's possible that the next_task slipped in of
2090 * higher priority than current. If that's the case
2091 * just reschedule current.
2092 */
2093 if (unlikely(next_task->prio < rq->curr->prio)) {
2094 resched_curr(rq);
2095 return 0;
2096 }
2097
2098 /* We might release rq lock */
2099 get_task_struct(next_task);
2100
2101 /* find_lock_lowest_rq locks the rq if found */
2102 lowest_rq = find_lock_lowest_rq(next_task, rq);
2103 if (!lowest_rq) {
2104 struct task_struct *task;
2105 /*
2106 * find_lock_lowest_rq releases rq->lock
2107 * so it is possible that next_task has migrated.
2108 *
2109 * We need to make sure that the task is still on the same
2110 * run-queue and is also still the next task eligible for
2111 * pushing.
2112 */
2113 task = pick_next_pushable_task(rq);
2114 if (task == next_task) {
2115 /*
2116 * The task hasn't migrated, and is still the next
2117 * eligible task, but we failed to find a run-queue
2118 * to push it to. Do not retry in this case, since
2119 * other CPUs will pull from us when ready.
2120 */
2121 goto out;
2122 }
2123
2124 if (!task)
2125 /* No more tasks, just exit */
2126 goto out;
2127
2128 /*
2129 * Something has shifted, try again.
2130 */
2131 put_task_struct(next_task);
2132 next_task = task;
2133 goto retry;
2134 }
2135
2136 deactivate_task(rq, next_task, 0);
2137 set_task_cpu(next_task, lowest_rq->cpu);
2138 activate_task(lowest_rq, next_task, 0);
2139 ret = 1;
2140
2141 resched_curr(lowest_rq);
2142
2143 double_unlock_balance(rq, lowest_rq);
2144
2145 out:
2146 put_task_struct(next_task);
2147
2148 return ret;
2149 }
2150
push_rt_tasks(struct rq * rq)2151 static void push_rt_tasks(struct rq *rq)
2152 {
2153 /* push_rt_task will return true if it moved an RT */
2154 while (push_rt_task(rq))
2155 ;
2156 }
2157
2158 #ifdef HAVE_RT_PUSH_IPI
2159
2160 /*
2161 * When a high priority task schedules out from a CPU and a lower priority
2162 * task is scheduled in, a check is made to see if there's any RT tasks
2163 * on other CPUs that are waiting to run because a higher priority RT task
2164 * is currently running on its CPU. In this case, the CPU with multiple RT
2165 * tasks queued on it (overloaded) needs to be notified that a CPU has opened
2166 * up that may be able to run one of its non-running queued RT tasks.
2167 *
2168 * All CPUs with overloaded RT tasks need to be notified as there is currently
2169 * no way to know which of these CPUs have the highest priority task waiting
2170 * to run. Instead of trying to take a spinlock on each of these CPUs,
2171 * which has shown to cause large latency when done on machines with many
2172 * CPUs, sending an IPI to the CPUs to have them push off the overloaded
2173 * RT tasks waiting to run.
2174 *
2175 * Just sending an IPI to each of the CPUs is also an issue, as on large
2176 * count CPU machines, this can cause an IPI storm on a CPU, especially
2177 * if its the only CPU with multiple RT tasks queued, and a large number
2178 * of CPUs scheduling a lower priority task at the same time.
2179 *
2180 * Each root domain has its own irq work function that can iterate over
2181 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
2182 * tassk must be checked if there's one or many CPUs that are lowering
2183 * their priority, there's a single irq work iterator that will try to
2184 * push off RT tasks that are waiting to run.
2185 *
2186 * When a CPU schedules a lower priority task, it will kick off the
2187 * irq work iterator that will jump to each CPU with overloaded RT tasks.
2188 * As it only takes the first CPU that schedules a lower priority task
2189 * to start the process, the rto_start variable is incremented and if
2190 * the atomic result is one, then that CPU will try to take the rto_lock.
2191 * This prevents high contention on the lock as the process handles all
2192 * CPUs scheduling lower priority tasks.
2193 *
2194 * All CPUs that are scheduling a lower priority task will increment the
2195 * rt_loop_next variable. This will make sure that the irq work iterator
2196 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
2197 * priority task, even if the iterator is in the middle of a scan. Incrementing
2198 * the rt_loop_next will cause the iterator to perform another scan.
2199 *
2200 */
rto_next_cpu(struct root_domain * rd)2201 static int rto_next_cpu(struct root_domain *rd)
2202 {
2203 int next;
2204 int cpu;
2205
2206 /*
2207 * When starting the IPI RT pushing, the rto_cpu is set to -1,
2208 * rt_next_cpu() will simply return the first CPU found in
2209 * the rto_mask.
2210 *
2211 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
2212 * will return the next CPU found in the rto_mask.
2213 *
2214 * If there are no more CPUs left in the rto_mask, then a check is made
2215 * against rto_loop and rto_loop_next. rto_loop is only updated with
2216 * the rto_lock held, but any CPU may increment the rto_loop_next
2217 * without any locking.
2218 */
2219 for (;;) {
2220
2221 /* When rto_cpu is -1 this acts like cpumask_first() */
2222 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
2223
2224 rd->rto_cpu = cpu;
2225
2226 if (cpu < nr_cpu_ids)
2227 return cpu;
2228
2229 rd->rto_cpu = -1;
2230
2231 /*
2232 * ACQUIRE ensures we see the @rto_mask changes
2233 * made prior to the @next value observed.
2234 *
2235 * Matches WMB in rt_set_overload().
2236 */
2237 next = atomic_read_acquire(&rd->rto_loop_next);
2238
2239 if (rd->rto_loop == next)
2240 break;
2241
2242 rd->rto_loop = next;
2243 }
2244
2245 return -1;
2246 }
2247
rto_start_trylock(atomic_t * v)2248 static inline bool rto_start_trylock(atomic_t *v)
2249 {
2250 return !atomic_cmpxchg_acquire(v, 0, 1);
2251 }
2252
rto_start_unlock(atomic_t * v)2253 static inline void rto_start_unlock(atomic_t *v)
2254 {
2255 atomic_set_release(v, 0);
2256 }
2257
tell_cpu_to_push(struct rq * rq)2258 static void tell_cpu_to_push(struct rq *rq)
2259 {
2260 int cpu = -1;
2261
2262 /* Keep the loop going if the IPI is currently active */
2263 atomic_inc(&rq->rd->rto_loop_next);
2264
2265 /* Only one CPU can initiate a loop at a time */
2266 if (!rto_start_trylock(&rq->rd->rto_loop_start))
2267 return;
2268
2269 raw_spin_lock(&rq->rd->rto_lock);
2270
2271 /*
2272 * The rto_cpu is updated under the lock, if it has a valid CPU
2273 * then the IPI is still running and will continue due to the
2274 * update to loop_next, and nothing needs to be done here.
2275 * Otherwise it is finishing up and an ipi needs to be sent.
2276 */
2277 if (rq->rd->rto_cpu < 0)
2278 cpu = rto_next_cpu(rq->rd);
2279
2280 raw_spin_unlock(&rq->rd->rto_lock);
2281
2282 rto_start_unlock(&rq->rd->rto_loop_start);
2283
2284 if (cpu >= 0) {
2285 /* Make sure the rd does not get freed while pushing */
2286 sched_get_rd(rq->rd);
2287 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2288 }
2289 }
2290
2291 /* Called from hardirq context */
rto_push_irq_work_func(struct irq_work * work)2292 void rto_push_irq_work_func(struct irq_work *work)
2293 {
2294 struct root_domain *rd =
2295 container_of(work, struct root_domain, rto_push_work);
2296 struct rq *rq;
2297 int cpu;
2298
2299 rq = this_rq();
2300
2301 /*
2302 * We do not need to grab the lock to check for has_pushable_tasks.
2303 * When it gets updated, a check is made if a push is possible.
2304 */
2305 if (has_pushable_tasks(rq)) {
2306 raw_spin_lock(&rq->lock);
2307 push_rt_tasks(rq);
2308 raw_spin_unlock(&rq->lock);
2309 }
2310
2311 raw_spin_lock(&rd->rto_lock);
2312
2313 /* Pass the IPI to the next rt overloaded queue */
2314 cpu = rto_next_cpu(rd);
2315
2316 raw_spin_unlock(&rd->rto_lock);
2317
2318 if (cpu < 0) {
2319 sched_put_rd(rd);
2320 return;
2321 }
2322
2323 /* Try the next RT overloaded CPU */
2324 irq_work_queue_on(&rd->rto_push_work, cpu);
2325 }
2326 #endif /* HAVE_RT_PUSH_IPI */
2327
pull_rt_task(struct rq * this_rq)2328 static void pull_rt_task(struct rq *this_rq)
2329 {
2330 int this_cpu = this_rq->cpu, cpu;
2331 bool resched = false;
2332 struct task_struct *p;
2333 struct rq *src_rq;
2334 int rt_overload_count = rt_overloaded(this_rq);
2335
2336 if (likely(!rt_overload_count))
2337 return;
2338
2339 /*
2340 * Match the barrier from rt_set_overloaded; this guarantees that if we
2341 * see overloaded we must also see the rto_mask bit.
2342 */
2343 smp_rmb();
2344
2345 /* If we are the only overloaded CPU do nothing */
2346 if (rt_overload_count == 1 &&
2347 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2348 return;
2349
2350 #ifdef HAVE_RT_PUSH_IPI
2351 if (sched_feat(RT_PUSH_IPI)) {
2352 tell_cpu_to_push(this_rq);
2353 return;
2354 }
2355 #endif
2356
2357 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2358 if (this_cpu == cpu)
2359 continue;
2360
2361 src_rq = cpu_rq(cpu);
2362
2363 /*
2364 * Don't bother taking the src_rq->lock if the next highest
2365 * task is known to be lower-priority than our current task.
2366 * This may look racy, but if this value is about to go
2367 * logically higher, the src_rq will push this task away.
2368 * And if its going logically lower, we do not care
2369 */
2370 if (src_rq->rt.highest_prio.next >=
2371 this_rq->rt.highest_prio.curr)
2372 continue;
2373
2374 /*
2375 * We can potentially drop this_rq's lock in
2376 * double_lock_balance, and another CPU could
2377 * alter this_rq
2378 */
2379 double_lock_balance(this_rq, src_rq);
2380
2381 /*
2382 * We can pull only a task, which is pushable
2383 * on its rq, and no others.
2384 */
2385 p = pick_highest_pushable_task(src_rq, this_cpu);
2386
2387 /*
2388 * Do we have an RT task that preempts
2389 * the to-be-scheduled task?
2390 */
2391 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2392 WARN_ON(p == src_rq->curr);
2393 WARN_ON(!task_on_rq_queued(p));
2394
2395 /*
2396 * There's a chance that p is higher in priority
2397 * than what's currently running on its CPU.
2398 * This is just that p is wakeing up and hasn't
2399 * had a chance to schedule. We only pull
2400 * p if it is lower in priority than the
2401 * current task on the run queue
2402 */
2403 if (p->prio < src_rq->curr->prio)
2404 goto skip;
2405
2406 resched = true;
2407
2408 deactivate_task(src_rq, p, 0);
2409 set_task_cpu(p, this_cpu);
2410 activate_task(this_rq, p, 0);
2411 /*
2412 * We continue with the search, just in
2413 * case there's an even higher prio task
2414 * in another runqueue. (low likelihood
2415 * but possible)
2416 */
2417 }
2418 skip:
2419 double_unlock_balance(this_rq, src_rq);
2420 }
2421
2422 if (resched)
2423 resched_curr(this_rq);
2424 }
2425
2426 /*
2427 * If we are not running and we are not going to reschedule soon, we should
2428 * try to push tasks away now
2429 */
task_woken_rt(struct rq * rq,struct task_struct * p)2430 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2431 {
2432 bool need_to_push = !task_running(rq, p) &&
2433 !test_tsk_need_resched(rq->curr) &&
2434 p->nr_cpus_allowed > 1 &&
2435 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2436 (rq->curr->nr_cpus_allowed < 2 ||
2437 rq->curr->prio <= p->prio);
2438
2439 if (need_to_push)
2440 push_rt_tasks(rq);
2441 }
2442
2443 /* Assumes rq->lock is held */
rq_online_rt(struct rq * rq)2444 static void rq_online_rt(struct rq *rq)
2445 {
2446 if (rq->rt.overloaded)
2447 rt_set_overload(rq);
2448
2449 __enable_runtime(rq);
2450
2451 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2452 }
2453
2454 /* Assumes rq->lock is held */
rq_offline_rt(struct rq * rq)2455 static void rq_offline_rt(struct rq *rq)
2456 {
2457 if (rq->rt.overloaded)
2458 rt_clear_overload(rq);
2459
2460 __disable_runtime(rq);
2461
2462 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2463 }
2464
2465 /*
2466 * When switch from the rt queue, we bring ourselves to a position
2467 * that we might want to pull RT tasks from other runqueues.
2468 */
switched_from_rt(struct rq * rq,struct task_struct * p)2469 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2470 {
2471 /*
2472 * If there are other RT tasks then we will reschedule
2473 * and the scheduling of the other RT tasks will handle
2474 * the balancing. But if we are the last RT task
2475 * we may need to handle the pulling of RT tasks
2476 * now.
2477 */
2478 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running ||
2479 cpu_isolated(cpu_of(rq)))
2480 return;
2481
2482 rt_queue_pull_task(rq);
2483 }
2484
init_sched_rt_class(void)2485 void __init init_sched_rt_class(void)
2486 {
2487 unsigned int i;
2488
2489 for_each_possible_cpu(i) {
2490 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2491 GFP_KERNEL, cpu_to_node(i));
2492 }
2493 }
2494 #endif /* CONFIG_SMP */
2495
2496 /*
2497 * When switching a task to RT, we may overload the runqueue
2498 * with RT tasks. In this case we try to push them off to
2499 * other runqueues.
2500 */
switched_to_rt(struct rq * rq,struct task_struct * p)2501 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2502 {
2503 /*
2504 * If we are running, update the avg_rt tracking, as the running time
2505 * will now on be accounted into the latter.
2506 */
2507 if (task_current(rq, p)) {
2508 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2509 return;
2510 }
2511
2512 /*
2513 * If we are not running we may need to preempt the current
2514 * running task. If that current running task is also an RT task
2515 * then see if we can move to another run queue.
2516 */
2517 if (task_on_rq_queued(p)) {
2518 #ifdef CONFIG_SMP
2519 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2520 rt_queue_push_tasks(rq);
2521 #endif /* CONFIG_SMP */
2522 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2523 resched_curr(rq);
2524 }
2525 }
2526
2527 /*
2528 * Priority of the task has changed. This may cause
2529 * us to initiate a push or pull.
2530 */
2531 static void
prio_changed_rt(struct rq * rq,struct task_struct * p,int oldprio)2532 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2533 {
2534 if (!task_on_rq_queued(p))
2535 return;
2536
2537 if (rq->curr == p) {
2538 #ifdef CONFIG_SMP
2539 /*
2540 * If our priority decreases while running, we
2541 * may need to pull tasks to this runqueue.
2542 */
2543 if (oldprio < p->prio)
2544 rt_queue_pull_task(rq);
2545
2546 /*
2547 * If there's a higher priority task waiting to run
2548 * then reschedule.
2549 */
2550 if (p->prio > rq->rt.highest_prio.curr)
2551 resched_curr(rq);
2552 #else
2553 /* For UP simply resched on drop of prio */
2554 if (oldprio < p->prio)
2555 resched_curr(rq);
2556 #endif /* CONFIG_SMP */
2557 } else {
2558 /*
2559 * This task is not running, but if it is
2560 * greater than the current running task
2561 * then reschedule.
2562 */
2563 if (p->prio < rq->curr->prio)
2564 resched_curr(rq);
2565 }
2566 }
2567
2568 #ifdef CONFIG_POSIX_TIMERS
watchdog(struct rq * rq,struct task_struct * p)2569 static void watchdog(struct rq *rq, struct task_struct *p)
2570 {
2571 unsigned long soft, hard;
2572
2573 /* max may change after cur was read, this will be fixed next tick */
2574 soft = task_rlimit(p, RLIMIT_RTTIME);
2575 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2576
2577 if (soft != RLIM_INFINITY) {
2578 unsigned long next;
2579
2580 if (p->rt.watchdog_stamp != jiffies) {
2581 p->rt.timeout++;
2582 p->rt.watchdog_stamp = jiffies;
2583 }
2584
2585 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2586 if (p->rt.timeout > next) {
2587 posix_cputimers_rt_watchdog(&p->posix_cputimers,
2588 p->se.sum_exec_runtime);
2589 }
2590 }
2591 }
2592 #else
watchdog(struct rq * rq,struct task_struct * p)2593 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2594 #endif
2595
2596 /*
2597 * scheduler tick hitting a task of our scheduling class.
2598 *
2599 * NOTE: This function can be called remotely by the tick offload that
2600 * goes along full dynticks. Therefore no local assumption can be made
2601 * and everything must be accessed through the @rq and @curr passed in
2602 * parameters.
2603 */
task_tick_rt(struct rq * rq,struct task_struct * p,int queued)2604 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2605 {
2606 struct sched_rt_entity *rt_se = &p->rt;
2607
2608 update_curr_rt(rq);
2609 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2610
2611 watchdog(rq, p);
2612
2613 /*
2614 * RR tasks need a special form of timeslice management.
2615 * FIFO tasks have no timeslices.
2616 */
2617 if (p->policy != SCHED_RR)
2618 return;
2619
2620 if (--p->rt.time_slice)
2621 return;
2622
2623 p->rt.time_slice = sched_rr_timeslice;
2624
2625 /*
2626 * Requeue to the end of queue if we (and all of our ancestors) are not
2627 * the only element on the queue
2628 */
2629 for_each_sched_rt_entity(rt_se) {
2630 if (rt_se->run_list.prev != rt_se->run_list.next) {
2631 requeue_task_rt(rq, p, 0);
2632 resched_curr(rq);
2633 return;
2634 }
2635 }
2636 }
2637
2638 #ifdef CONFIG_SCHED_RT_ACTIVE_LB
rt_active_load_balance_cpu_stop(void * data)2639 static int rt_active_load_balance_cpu_stop(void *data)
2640 {
2641 struct rq *busiest_rq = data;
2642 struct task_struct *next_task = busiest_rq->rt_push_task;
2643 struct rq *lowest_rq = NULL;
2644 unsigned long flags;
2645
2646 raw_spin_lock_irqsave(&busiest_rq->lock, flags);
2647 busiest_rq->rt_active_balance = 0;
2648
2649 if (!task_on_rq_queued(next_task) ||
2650 task_cpu(next_task) != cpu_of(busiest_rq))
2651 goto out;
2652
2653 /* find_lock_lowest_rq locks the rq if found */
2654 lowest_rq = find_lock_lowest_rq(next_task, busiest_rq);
2655 if (!lowest_rq)
2656 goto out;
2657
2658 if (capacity_orig_of(cpu_of(lowest_rq)) <= capacity_orig_of(task_cpu(next_task)))
2659 goto unlock;
2660
2661 deactivate_task(busiest_rq, next_task, 0);
2662 set_task_cpu(next_task, lowest_rq->cpu);
2663 activate_task(lowest_rq, next_task, 0);
2664
2665 resched_curr(lowest_rq);
2666 unlock:
2667 double_unlock_balance(busiest_rq, lowest_rq);
2668 out:
2669 put_task_struct(next_task);
2670 raw_spin_unlock_irqrestore(&busiest_rq->lock, flags);
2671
2672 return 0;
2673 }
2674
check_for_migration_rt(struct rq * rq,struct task_struct * p)2675 static void check_for_migration_rt(struct rq *rq, struct task_struct *p)
2676 {
2677 bool need_actvie_lb = false;
2678 bool misfit_task = false;
2679 int cpu = task_cpu(p);
2680 unsigned long cpu_orig_cap;
2681 #ifdef CONFIG_SCHED_RTG
2682 struct cpumask *rtg_target = NULL;
2683 #endif
2684
2685 if (!sysctl_sched_enable_rt_active_lb)
2686 return;
2687
2688 if (p->nr_cpus_allowed == 1)
2689 return;
2690
2691 cpu_orig_cap = capacity_orig_of(cpu);
2692 /* cpu has max capacity, no need to do balance */
2693 if (cpu_orig_cap == rq->rd->max_cpu_capacity)
2694 return;
2695
2696 #ifdef CONFIG_SCHED_RTG
2697 rtg_target = find_rtg_target(p);
2698 if (rtg_target)
2699 misfit_task = capacity_orig_of(cpumask_first(rtg_target)) >
2700 cpu_orig_cap;
2701 else
2702 misfit_task = !rt_task_fits_capacity(p, cpu);
2703 #else
2704 misfit_task = !rt_task_fits_capacity(p, cpu);
2705 #endif
2706
2707 if (misfit_task) {
2708 raw_spin_lock(&rq->lock);
2709 if (!rq->active_balance && !rq->rt_active_balance) {
2710 rq->rt_active_balance = 1;
2711 rq->rt_push_task = p;
2712 get_task_struct(p);
2713 need_actvie_lb = true;
2714 }
2715 raw_spin_unlock(&rq->lock);
2716
2717 if (need_actvie_lb)
2718 stop_one_cpu_nowait(task_cpu(p),
2719 rt_active_load_balance_cpu_stop,
2720 rq, &rq->rt_active_balance_work);
2721 }
2722 }
2723 #endif
2724
get_rr_interval_rt(struct rq * rq,struct task_struct * task)2725 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2726 {
2727 /*
2728 * Time slice is 0 for SCHED_FIFO tasks
2729 */
2730 if (task->policy == SCHED_RR)
2731 return sched_rr_timeslice;
2732 else
2733 return 0;
2734 }
2735
2736 const struct sched_class rt_sched_class
2737 __section("__rt_sched_class") = {
2738 .enqueue_task = enqueue_task_rt,
2739 .dequeue_task = dequeue_task_rt,
2740 .yield_task = yield_task_rt,
2741
2742 .check_preempt_curr = check_preempt_curr_rt,
2743
2744 .pick_next_task = pick_next_task_rt,
2745 .put_prev_task = put_prev_task_rt,
2746 .set_next_task = set_next_task_rt,
2747
2748 #ifdef CONFIG_SMP
2749 .balance = balance_rt,
2750 .select_task_rq = select_task_rq_rt,
2751 .set_cpus_allowed = set_cpus_allowed_common,
2752 .rq_online = rq_online_rt,
2753 .rq_offline = rq_offline_rt,
2754 .task_woken = task_woken_rt,
2755 .switched_from = switched_from_rt,
2756 #endif
2757
2758 .task_tick = task_tick_rt,
2759
2760 .get_rr_interval = get_rr_interval_rt,
2761
2762 .prio_changed = prio_changed_rt,
2763 .switched_to = switched_to_rt,
2764
2765 .update_curr = update_curr_rt,
2766
2767 #ifdef CONFIG_UCLAMP_TASK
2768 .uclamp_enabled = 1,
2769 #endif
2770 #ifdef CONFIG_SCHED_WALT
2771 .fixup_walt_sched_stats = fixup_walt_sched_stats_common,
2772 #endif
2773 #ifdef CONFIG_SCHED_RT_ACTIVE_LB
2774 .check_for_migration = check_for_migration_rt,
2775 #endif
2776 };
2777
2778 #ifdef CONFIG_RT_GROUP_SCHED
2779 /*
2780 * Ensure that the real time constraints are schedulable.
2781 */
2782 static DEFINE_MUTEX(rt_constraints_mutex);
2783
tg_has_rt_tasks(struct task_group * tg)2784 static inline int tg_has_rt_tasks(struct task_group *tg)
2785 {
2786 struct task_struct *task;
2787 struct css_task_iter it;
2788 int ret = 0;
2789
2790 /*
2791 * Autogroups do not have RT tasks; see autogroup_create().
2792 */
2793 if (task_group_is_autogroup(tg))
2794 return 0;
2795
2796 css_task_iter_start(&tg->css, 0, &it);
2797 while (!ret && (task = css_task_iter_next(&it)))
2798 ret |= rt_task(task);
2799 css_task_iter_end(&it);
2800
2801 return ret;
2802 }
2803
2804 struct rt_schedulable_data {
2805 struct task_group *tg;
2806 u64 rt_period;
2807 u64 rt_runtime;
2808 };
2809
tg_rt_schedulable(struct task_group * tg,void * data)2810 static int tg_rt_schedulable(struct task_group *tg, void *data)
2811 {
2812 struct rt_schedulable_data *d = data;
2813 struct task_group *child;
2814 unsigned long total, sum = 0;
2815 u64 period, runtime;
2816
2817 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2818 runtime = tg->rt_bandwidth.rt_runtime;
2819
2820 if (tg == d->tg) {
2821 period = d->rt_period;
2822 runtime = d->rt_runtime;
2823 }
2824
2825 /*
2826 * Cannot have more runtime than the period.
2827 */
2828 if (runtime > period && runtime != RUNTIME_INF)
2829 return -EINVAL;
2830
2831 /*
2832 * Ensure we don't starve existing RT tasks if runtime turns zero.
2833 */
2834 if (rt_bandwidth_enabled() && !runtime &&
2835 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
2836 return -EBUSY;
2837
2838 total = to_ratio(period, runtime);
2839
2840 /*
2841 * Nobody can have more than the global setting allows.
2842 */
2843 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2844 return -EINVAL;
2845
2846 /*
2847 * The sum of our children's runtime should not exceed our own.
2848 */
2849 list_for_each_entry_rcu(child, &tg->children, siblings) {
2850 period = ktime_to_ns(child->rt_bandwidth.rt_period);
2851 runtime = child->rt_bandwidth.rt_runtime;
2852
2853 if (child == d->tg) {
2854 period = d->rt_period;
2855 runtime = d->rt_runtime;
2856 }
2857
2858 sum += to_ratio(period, runtime);
2859 }
2860
2861 if (sum > total)
2862 return -EINVAL;
2863
2864 return 0;
2865 }
2866
__rt_schedulable(struct task_group * tg,u64 period,u64 runtime)2867 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2868 {
2869 int ret;
2870
2871 struct rt_schedulable_data data = {
2872 .tg = tg,
2873 .rt_period = period,
2874 .rt_runtime = runtime,
2875 };
2876
2877 rcu_read_lock();
2878 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2879 rcu_read_unlock();
2880
2881 return ret;
2882 }
2883
tg_set_rt_bandwidth(struct task_group * tg,u64 rt_period,u64 rt_runtime)2884 static int tg_set_rt_bandwidth(struct task_group *tg,
2885 u64 rt_period, u64 rt_runtime)
2886 {
2887 int i, err = 0;
2888
2889 /*
2890 * Disallowing the root group RT runtime is BAD, it would disallow the
2891 * kernel creating (and or operating) RT threads.
2892 */
2893 if (tg == &root_task_group && rt_runtime == 0)
2894 return -EINVAL;
2895
2896 /* No period doesn't make any sense. */
2897 if (rt_period == 0)
2898 return -EINVAL;
2899
2900 /*
2901 * Bound quota to defend quota against overflow during bandwidth shift.
2902 */
2903 if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2904 return -EINVAL;
2905
2906 mutex_lock(&rt_constraints_mutex);
2907 err = __rt_schedulable(tg, rt_period, rt_runtime);
2908 if (err)
2909 goto unlock;
2910
2911 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2912 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2913 tg->rt_bandwidth.rt_runtime = rt_runtime;
2914
2915 for_each_possible_cpu(i) {
2916 struct rt_rq *rt_rq = tg->rt_rq[i];
2917
2918 raw_spin_lock(&rt_rq->rt_runtime_lock);
2919 rt_rq->rt_runtime = rt_runtime;
2920 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2921 }
2922 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2923 unlock:
2924 mutex_unlock(&rt_constraints_mutex);
2925
2926 return err;
2927 }
2928
sched_group_set_rt_runtime(struct task_group * tg,long rt_runtime_us)2929 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2930 {
2931 u64 rt_runtime, rt_period;
2932
2933 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2934 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2935 if (rt_runtime_us < 0)
2936 rt_runtime = RUNTIME_INF;
2937 else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2938 return -EINVAL;
2939
2940 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2941 }
2942
sched_group_rt_runtime(struct task_group * tg)2943 long sched_group_rt_runtime(struct task_group *tg)
2944 {
2945 u64 rt_runtime_us;
2946
2947 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2948 return -1;
2949
2950 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2951 do_div(rt_runtime_us, NSEC_PER_USEC);
2952 return rt_runtime_us;
2953 }
2954
sched_group_set_rt_period(struct task_group * tg,u64 rt_period_us)2955 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2956 {
2957 u64 rt_runtime, rt_period;
2958
2959 if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2960 return -EINVAL;
2961
2962 rt_period = rt_period_us * NSEC_PER_USEC;
2963 rt_runtime = tg->rt_bandwidth.rt_runtime;
2964
2965 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2966 }
2967
sched_group_rt_period(struct task_group * tg)2968 long sched_group_rt_period(struct task_group *tg)
2969 {
2970 u64 rt_period_us;
2971
2972 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2973 do_div(rt_period_us, NSEC_PER_USEC);
2974 return rt_period_us;
2975 }
2976
sched_rt_global_constraints(void)2977 static int sched_rt_global_constraints(void)
2978 {
2979 int ret = 0;
2980
2981 mutex_lock(&rt_constraints_mutex);
2982 ret = __rt_schedulable(NULL, 0, 0);
2983 mutex_unlock(&rt_constraints_mutex);
2984
2985 return ret;
2986 }
2987
sched_rt_can_attach(struct task_group * tg,struct task_struct * tsk)2988 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2989 {
2990 /* Don't accept realtime tasks when there is no way for them to run */
2991 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2992 return 0;
2993
2994 return 1;
2995 }
2996
2997 #else /* !CONFIG_RT_GROUP_SCHED */
sched_rt_global_constraints(void)2998 static int sched_rt_global_constraints(void)
2999 {
3000 unsigned long flags;
3001 int i;
3002
3003 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
3004 for_each_possible_cpu(i) {
3005 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
3006
3007 raw_spin_lock(&rt_rq->rt_runtime_lock);
3008 rt_rq->rt_runtime = global_rt_runtime();
3009 raw_spin_unlock(&rt_rq->rt_runtime_lock);
3010 }
3011 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
3012
3013 return 0;
3014 }
3015 #endif /* CONFIG_RT_GROUP_SCHED */
3016
sched_rt_global_validate(void)3017 static int sched_rt_global_validate(void)
3018 {
3019 if (sysctl_sched_rt_period <= 0)
3020 return -EINVAL;
3021
3022 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
3023 ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
3024 ((u64)sysctl_sched_rt_runtime *
3025 NSEC_PER_USEC > max_rt_runtime)))
3026 return -EINVAL;
3027
3028 return 0;
3029 }
3030
sched_rt_do_global(void)3031 static void sched_rt_do_global(void)
3032 {
3033 unsigned long flags;
3034
3035 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
3036 def_rt_bandwidth.rt_runtime = global_rt_runtime();
3037 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
3038 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
3039 }
3040
sched_rt_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)3041 int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
3042 size_t *lenp, loff_t *ppos)
3043 {
3044 int old_period, old_runtime;
3045 static DEFINE_MUTEX(mutex);
3046 int ret;
3047
3048 mutex_lock(&mutex);
3049 old_period = sysctl_sched_rt_period;
3050 old_runtime = sysctl_sched_rt_runtime;
3051
3052 ret = proc_dointvec(table, write, buffer, lenp, ppos);
3053
3054 if (!ret && write) {
3055 ret = sched_rt_global_validate();
3056 if (ret)
3057 goto undo;
3058
3059 ret = sched_dl_global_validate();
3060 if (ret)
3061 goto undo;
3062
3063 ret = sched_rt_global_constraints();
3064 if (ret)
3065 goto undo;
3066
3067 sched_rt_do_global();
3068 sched_dl_do_global();
3069 }
3070 if (0) {
3071 undo:
3072 sysctl_sched_rt_period = old_period;
3073 sysctl_sched_rt_runtime = old_runtime;
3074 }
3075 mutex_unlock(&mutex);
3076
3077 return ret;
3078 }
3079
sched_rr_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)3080 int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
3081 size_t *lenp, loff_t *ppos)
3082 {
3083 int ret;
3084 static DEFINE_MUTEX(mutex);
3085
3086 mutex_lock(&mutex);
3087 ret = proc_dointvec(table, write, buffer, lenp, ppos);
3088 /*
3089 * Make sure that internally we keep jiffies.
3090 * Also, writing zero resets the timeslice to default:
3091 */
3092 if (!ret && write) {
3093 sched_rr_timeslice =
3094 sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
3095 msecs_to_jiffies(sysctl_sched_rr_timeslice);
3096 }
3097 mutex_unlock(&mutex);
3098
3099 return ret;
3100 }
3101
3102 #ifdef CONFIG_SCHED_DEBUG
print_rt_stats(struct seq_file * m,int cpu)3103 void print_rt_stats(struct seq_file *m, int cpu)
3104 {
3105 rt_rq_iter_t iter;
3106 struct rt_rq *rt_rq;
3107
3108 rcu_read_lock();
3109 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
3110 print_rt_rq(m, cpu, rt_rq);
3111 rcu_read_unlock();
3112 }
3113 #endif /* CONFIG_SCHED_DEBUG */
3114