1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
12 *
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14 * Juri Lelli <juri.lelli@gmail.com>,
15 * Michael Trimarchi <michael@amarulasolutions.com>,
16 * Fabio Checconi <fchecconi@gmail.com>
17 */
18
19 #include <linux/cpuset.h>
20
21 /*
22 * Default limits for DL period; on the top end we guard against small util
23 * tasks still getting ridiculously long effective runtimes, on the bottom end we
24 * guard against timer DoS.
25 */
26 static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
27 static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */
28 #ifdef CONFIG_SYSCTL
29 static struct ctl_table sched_dl_sysctls[] = {
30 {
31 .procname = "sched_deadline_period_max_us",
32 .data = &sysctl_sched_dl_period_max,
33 .maxlen = sizeof(unsigned int),
34 .mode = 0644,
35 .proc_handler = proc_douintvec_minmax,
36 .extra1 = (void *)&sysctl_sched_dl_period_min,
37 },
38 {
39 .procname = "sched_deadline_period_min_us",
40 .data = &sysctl_sched_dl_period_min,
41 .maxlen = sizeof(unsigned int),
42 .mode = 0644,
43 .proc_handler = proc_douintvec_minmax,
44 .extra2 = (void *)&sysctl_sched_dl_period_max,
45 },
46 {}
47 };
48
sched_dl_sysctl_init(void)49 static int __init sched_dl_sysctl_init(void)
50 {
51 register_sysctl_init("kernel", sched_dl_sysctls);
52 return 0;
53 }
54 late_initcall(sched_dl_sysctl_init);
55 #endif
56
dl_task_of(struct sched_dl_entity * dl_se)57 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
58 {
59 return container_of(dl_se, struct task_struct, dl);
60 }
61
rq_of_dl_rq(struct dl_rq * dl_rq)62 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
63 {
64 return container_of(dl_rq, struct rq, dl);
65 }
66
dl_rq_of_se(struct sched_dl_entity * dl_se)67 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
68 {
69 struct task_struct *p = dl_task_of(dl_se);
70 struct rq *rq = task_rq(p);
71
72 return &rq->dl;
73 }
74
on_dl_rq(struct sched_dl_entity * dl_se)75 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
76 {
77 return !RB_EMPTY_NODE(&dl_se->rb_node);
78 }
79
80 #ifdef CONFIG_RT_MUTEXES
pi_of(struct sched_dl_entity * dl_se)81 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
82 {
83 return dl_se->pi_se;
84 }
85
is_dl_boosted(struct sched_dl_entity * dl_se)86 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
87 {
88 return pi_of(dl_se) != dl_se;
89 }
90 #else
pi_of(struct sched_dl_entity * dl_se)91 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
92 {
93 return dl_se;
94 }
95
is_dl_boosted(struct sched_dl_entity * dl_se)96 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
97 {
98 return false;
99 }
100 #endif
101
102 #ifdef CONFIG_SMP
dl_bw_of(int i)103 static inline struct dl_bw *dl_bw_of(int i)
104 {
105 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
106 "sched RCU must be held");
107 return &cpu_rq(i)->rd->dl_bw;
108 }
109
dl_bw_cpus(int i)110 static inline int dl_bw_cpus(int i)
111 {
112 struct root_domain *rd = cpu_rq(i)->rd;
113 int cpus;
114
115 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
116 "sched RCU must be held");
117
118 if (cpumask_subset(rd->span, cpu_active_mask))
119 return cpumask_weight(rd->span);
120
121 cpus = 0;
122
123 for_each_cpu_and(i, rd->span, cpu_active_mask)
124 cpus++;
125
126 return cpus;
127 }
128
__dl_bw_capacity(const struct cpumask * mask)129 static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
130 {
131 unsigned long cap = 0;
132 int i;
133
134 for_each_cpu_and(i, mask, cpu_active_mask)
135 cap += capacity_orig_of(i);
136
137 return cap;
138 }
139
140 /*
141 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
142 * of the CPU the task is running on rather rd's \Sum CPU capacity.
143 */
dl_bw_capacity(int i)144 static inline unsigned long dl_bw_capacity(int i)
145 {
146 if (!sched_asym_cpucap_active() &&
147 capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
148 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
149 } else {
150 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
151 "sched RCU must be held");
152
153 return __dl_bw_capacity(cpu_rq(i)->rd->span);
154 }
155 }
156
dl_bw_visited(int cpu,u64 gen)157 static inline bool dl_bw_visited(int cpu, u64 gen)
158 {
159 struct root_domain *rd = cpu_rq(cpu)->rd;
160
161 if (rd->visit_gen == gen)
162 return true;
163
164 rd->visit_gen = gen;
165 return false;
166 }
167
168 static inline
__dl_update(struct dl_bw * dl_b,s64 bw)169 void __dl_update(struct dl_bw *dl_b, s64 bw)
170 {
171 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
172 int i;
173
174 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
175 "sched RCU must be held");
176 for_each_cpu_and(i, rd->span, cpu_active_mask) {
177 struct rq *rq = cpu_rq(i);
178
179 rq->dl.extra_bw += bw;
180 }
181 }
182 #else
dl_bw_of(int i)183 static inline struct dl_bw *dl_bw_of(int i)
184 {
185 return &cpu_rq(i)->dl.dl_bw;
186 }
187
dl_bw_cpus(int i)188 static inline int dl_bw_cpus(int i)
189 {
190 return 1;
191 }
192
dl_bw_capacity(int i)193 static inline unsigned long dl_bw_capacity(int i)
194 {
195 return SCHED_CAPACITY_SCALE;
196 }
197
dl_bw_visited(int cpu,u64 gen)198 static inline bool dl_bw_visited(int cpu, u64 gen)
199 {
200 return false;
201 }
202
203 static inline
__dl_update(struct dl_bw * dl_b,s64 bw)204 void __dl_update(struct dl_bw *dl_b, s64 bw)
205 {
206 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
207
208 dl->extra_bw += bw;
209 }
210 #endif
211
212 static inline
__dl_sub(struct dl_bw * dl_b,u64 tsk_bw,int cpus)213 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
214 {
215 dl_b->total_bw -= tsk_bw;
216 __dl_update(dl_b, (s32)tsk_bw / cpus);
217 }
218
219 static inline
__dl_add(struct dl_bw * dl_b,u64 tsk_bw,int cpus)220 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
221 {
222 dl_b->total_bw += tsk_bw;
223 __dl_update(dl_b, -((s32)tsk_bw / cpus));
224 }
225
226 static inline bool
__dl_overflow(struct dl_bw * dl_b,unsigned long cap,u64 old_bw,u64 new_bw)227 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
228 {
229 return dl_b->bw != -1 &&
230 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
231 }
232
233 static inline
__add_running_bw(u64 dl_bw,struct dl_rq * dl_rq)234 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
235 {
236 u64 old = dl_rq->running_bw;
237
238 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
239 dl_rq->running_bw += dl_bw;
240 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
241 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
242 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
243 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
244 }
245
246 static inline
__sub_running_bw(u64 dl_bw,struct dl_rq * dl_rq)247 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
248 {
249 u64 old = dl_rq->running_bw;
250
251 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
252 dl_rq->running_bw -= dl_bw;
253 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
254 if (dl_rq->running_bw > old)
255 dl_rq->running_bw = 0;
256 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
257 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
258 }
259
260 static inline
__add_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)261 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
262 {
263 u64 old = dl_rq->this_bw;
264
265 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
266 dl_rq->this_bw += dl_bw;
267 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
268 }
269
270 static inline
__sub_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)271 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
272 {
273 u64 old = dl_rq->this_bw;
274
275 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
276 dl_rq->this_bw -= dl_bw;
277 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
278 if (dl_rq->this_bw > old)
279 dl_rq->this_bw = 0;
280 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
281 }
282
283 static inline
add_rq_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)284 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
285 {
286 if (!dl_entity_is_special(dl_se))
287 __add_rq_bw(dl_se->dl_bw, dl_rq);
288 }
289
290 static inline
sub_rq_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)291 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
292 {
293 if (!dl_entity_is_special(dl_se))
294 __sub_rq_bw(dl_se->dl_bw, dl_rq);
295 }
296
297 static inline
add_running_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)298 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
299 {
300 if (!dl_entity_is_special(dl_se))
301 __add_running_bw(dl_se->dl_bw, dl_rq);
302 }
303
304 static inline
sub_running_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)305 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
306 {
307 if (!dl_entity_is_special(dl_se))
308 __sub_running_bw(dl_se->dl_bw, dl_rq);
309 }
310
dl_change_utilization(struct task_struct * p,u64 new_bw)311 static void dl_change_utilization(struct task_struct *p, u64 new_bw)
312 {
313 struct rq *rq;
314
315 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
316
317 if (task_on_rq_queued(p))
318 return;
319
320 rq = task_rq(p);
321 if (p->dl.dl_non_contending) {
322 sub_running_bw(&p->dl, &rq->dl);
323 p->dl.dl_non_contending = 0;
324 /*
325 * If the timer handler is currently running and the
326 * timer cannot be canceled, inactive_task_timer()
327 * will see that dl_not_contending is not set, and
328 * will not touch the rq's active utilization,
329 * so we are still safe.
330 */
331 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
332 put_task_struct(p);
333 }
334 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
335 __add_rq_bw(new_bw, &rq->dl);
336 }
337
338 /*
339 * The utilization of a task cannot be immediately removed from
340 * the rq active utilization (running_bw) when the task blocks.
341 * Instead, we have to wait for the so called "0-lag time".
342 *
343 * If a task blocks before the "0-lag time", a timer (the inactive
344 * timer) is armed, and running_bw is decreased when the timer
345 * fires.
346 *
347 * If the task wakes up again before the inactive timer fires,
348 * the timer is canceled, whereas if the task wakes up after the
349 * inactive timer fired (and running_bw has been decreased) the
350 * task's utilization has to be added to running_bw again.
351 * A flag in the deadline scheduling entity (dl_non_contending)
352 * is used to avoid race conditions between the inactive timer handler
353 * and task wakeups.
354 *
355 * The following diagram shows how running_bw is updated. A task is
356 * "ACTIVE" when its utilization contributes to running_bw; an
357 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
358 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
359 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
360 * time already passed, which does not contribute to running_bw anymore.
361 * +------------------+
362 * wakeup | ACTIVE |
363 * +------------------>+ contending |
364 * | add_running_bw | |
365 * | +----+------+------+
366 * | | ^
367 * | dequeue | |
368 * +--------+-------+ | |
369 * | | t >= 0-lag | | wakeup
370 * | INACTIVE |<---------------+ |
371 * | | sub_running_bw | |
372 * +--------+-------+ | |
373 * ^ | |
374 * | t < 0-lag | |
375 * | | |
376 * | V |
377 * | +----+------+------+
378 * | sub_running_bw | ACTIVE |
379 * +-------------------+ |
380 * inactive timer | non contending |
381 * fired +------------------+
382 *
383 * The task_non_contending() function is invoked when a task
384 * blocks, and checks if the 0-lag time already passed or
385 * not (in the first case, it directly updates running_bw;
386 * in the second case, it arms the inactive timer).
387 *
388 * The task_contending() function is invoked when a task wakes
389 * up, and checks if the task is still in the "ACTIVE non contending"
390 * state or not (in the second case, it updates running_bw).
391 */
task_non_contending(struct task_struct * p)392 static void task_non_contending(struct task_struct *p)
393 {
394 struct sched_dl_entity *dl_se = &p->dl;
395 struct hrtimer *timer = &dl_se->inactive_timer;
396 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
397 struct rq *rq = rq_of_dl_rq(dl_rq);
398 s64 zerolag_time;
399
400 /*
401 * If this is a non-deadline task that has been boosted,
402 * do nothing
403 */
404 if (dl_se->dl_runtime == 0)
405 return;
406
407 if (dl_entity_is_special(dl_se))
408 return;
409
410 WARN_ON(dl_se->dl_non_contending);
411
412 zerolag_time = dl_se->deadline -
413 div64_long((dl_se->runtime * dl_se->dl_period),
414 dl_se->dl_runtime);
415
416 /*
417 * Using relative times instead of the absolute "0-lag time"
418 * allows to simplify the code
419 */
420 zerolag_time -= rq_clock(rq);
421
422 /*
423 * If the "0-lag time" already passed, decrease the active
424 * utilization now, instead of starting a timer
425 */
426 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
427 if (dl_task(p))
428 sub_running_bw(dl_se, dl_rq);
429 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
430 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
431
432 if (READ_ONCE(p->__state) == TASK_DEAD)
433 sub_rq_bw(&p->dl, &rq->dl);
434 raw_spin_lock(&dl_b->lock);
435 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
436 raw_spin_unlock(&dl_b->lock);
437 __dl_clear_params(p);
438 }
439
440 return;
441 }
442
443 dl_se->dl_non_contending = 1;
444 get_task_struct(p);
445 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
446 }
447
task_contending(struct sched_dl_entity * dl_se,int flags)448 static void task_contending(struct sched_dl_entity *dl_se, int flags)
449 {
450 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
451
452 /*
453 * If this is a non-deadline task that has been boosted,
454 * do nothing
455 */
456 if (dl_se->dl_runtime == 0)
457 return;
458
459 if (flags & ENQUEUE_MIGRATED)
460 add_rq_bw(dl_se, dl_rq);
461
462 if (dl_se->dl_non_contending) {
463 dl_se->dl_non_contending = 0;
464 /*
465 * If the timer handler is currently running and the
466 * timer cannot be canceled, inactive_task_timer()
467 * will see that dl_not_contending is not set, and
468 * will not touch the rq's active utilization,
469 * so we are still safe.
470 */
471 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
472 put_task_struct(dl_task_of(dl_se));
473 } else {
474 /*
475 * Since "dl_non_contending" is not set, the
476 * task's utilization has already been removed from
477 * active utilization (either when the task blocked,
478 * when the "inactive timer" fired).
479 * So, add it back.
480 */
481 add_running_bw(dl_se, dl_rq);
482 }
483 }
484
is_leftmost(struct task_struct * p,struct dl_rq * dl_rq)485 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
486 {
487 struct sched_dl_entity *dl_se = &p->dl;
488
489 return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
490 }
491
492 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
493
init_dl_bandwidth(struct dl_bandwidth * dl_b,u64 period,u64 runtime)494 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
495 {
496 raw_spin_lock_init(&dl_b->dl_runtime_lock);
497 dl_b->dl_period = period;
498 dl_b->dl_runtime = runtime;
499 }
500
init_dl_bw(struct dl_bw * dl_b)501 void init_dl_bw(struct dl_bw *dl_b)
502 {
503 raw_spin_lock_init(&dl_b->lock);
504 if (global_rt_runtime() == RUNTIME_INF)
505 dl_b->bw = -1;
506 else
507 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
508 dl_b->total_bw = 0;
509 }
510
init_dl_rq(struct dl_rq * dl_rq)511 void init_dl_rq(struct dl_rq *dl_rq)
512 {
513 dl_rq->root = RB_ROOT_CACHED;
514
515 #ifdef CONFIG_SMP
516 /* zero means no -deadline tasks */
517 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
518
519 dl_rq->dl_nr_migratory = 0;
520 dl_rq->overloaded = 0;
521 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
522 #else
523 init_dl_bw(&dl_rq->dl_bw);
524 #endif
525
526 dl_rq->running_bw = 0;
527 dl_rq->this_bw = 0;
528 init_dl_rq_bw_ratio(dl_rq);
529 }
530
531 #ifdef CONFIG_SMP
532
dl_overloaded(struct rq * rq)533 static inline int dl_overloaded(struct rq *rq)
534 {
535 return atomic_read(&rq->rd->dlo_count);
536 }
537
dl_set_overload(struct rq * rq)538 static inline void dl_set_overload(struct rq *rq)
539 {
540 if (!rq->online)
541 return;
542
543 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
544 /*
545 * Must be visible before the overload count is
546 * set (as in sched_rt.c).
547 *
548 * Matched by the barrier in pull_dl_task().
549 */
550 smp_wmb();
551 atomic_inc(&rq->rd->dlo_count);
552 }
553
dl_clear_overload(struct rq * rq)554 static inline void dl_clear_overload(struct rq *rq)
555 {
556 if (!rq->online)
557 return;
558
559 atomic_dec(&rq->rd->dlo_count);
560 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
561 }
562
update_dl_migration(struct dl_rq * dl_rq)563 static void update_dl_migration(struct dl_rq *dl_rq)
564 {
565 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
566 if (!dl_rq->overloaded) {
567 dl_set_overload(rq_of_dl_rq(dl_rq));
568 dl_rq->overloaded = 1;
569 }
570 } else if (dl_rq->overloaded) {
571 dl_clear_overload(rq_of_dl_rq(dl_rq));
572 dl_rq->overloaded = 0;
573 }
574 }
575
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)576 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
577 {
578 struct task_struct *p = dl_task_of(dl_se);
579
580 if (p->nr_cpus_allowed > 1)
581 dl_rq->dl_nr_migratory++;
582
583 update_dl_migration(dl_rq);
584 }
585
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)586 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
587 {
588 struct task_struct *p = dl_task_of(dl_se);
589
590 if (p->nr_cpus_allowed > 1)
591 dl_rq->dl_nr_migratory--;
592
593 update_dl_migration(dl_rq);
594 }
595
596 #define __node_2_pdl(node) \
597 rb_entry((node), struct task_struct, pushable_dl_tasks)
598
__pushable_less(struct rb_node * a,const struct rb_node * b)599 static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
600 {
601 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
602 }
603
604 /*
605 * The list of pushable -deadline task is not a plist, like in
606 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
607 */
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)608 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
609 {
610 struct rb_node *leftmost;
611
612 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
613
614 leftmost = rb_add_cached(&p->pushable_dl_tasks,
615 &rq->dl.pushable_dl_tasks_root,
616 __pushable_less);
617 if (leftmost)
618 rq->dl.earliest_dl.next = p->dl.deadline;
619 }
620
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)621 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
622 {
623 struct dl_rq *dl_rq = &rq->dl;
624 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
625 struct rb_node *leftmost;
626
627 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
628 return;
629
630 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
631 if (leftmost)
632 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
633
634 RB_CLEAR_NODE(&p->pushable_dl_tasks);
635 }
636
has_pushable_dl_tasks(struct rq * rq)637 static inline int has_pushable_dl_tasks(struct rq *rq)
638 {
639 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
640 }
641
642 static int push_dl_task(struct rq *rq);
643
need_pull_dl_task(struct rq * rq,struct task_struct * prev)644 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
645 {
646 return rq->online && dl_task(prev);
647 }
648
649 static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
650 static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
651
652 static void push_dl_tasks(struct rq *);
653 static void pull_dl_task(struct rq *);
654
deadline_queue_push_tasks(struct rq * rq)655 static inline void deadline_queue_push_tasks(struct rq *rq)
656 {
657 if (!has_pushable_dl_tasks(rq))
658 return;
659
660 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
661 }
662
deadline_queue_pull_task(struct rq * rq)663 static inline void deadline_queue_pull_task(struct rq *rq)
664 {
665 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
666 }
667
668 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
669
dl_task_offline_migration(struct rq * rq,struct task_struct * p)670 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
671 {
672 struct rq *later_rq = NULL;
673 struct dl_bw *dl_b;
674
675 later_rq = find_lock_later_rq(p, rq);
676 if (!later_rq) {
677 int cpu;
678
679 /*
680 * If we cannot preempt any rq, fall back to pick any
681 * online CPU:
682 */
683 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
684 if (cpu >= nr_cpu_ids) {
685 /*
686 * Failed to find any suitable CPU.
687 * The task will never come back!
688 */
689 WARN_ON_ONCE(dl_bandwidth_enabled());
690
691 /*
692 * If admission control is disabled we
693 * try a little harder to let the task
694 * run.
695 */
696 cpu = cpumask_any(cpu_active_mask);
697 }
698 later_rq = cpu_rq(cpu);
699 double_lock_balance(rq, later_rq);
700 }
701
702 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
703 /*
704 * Inactive timer is armed (or callback is running, but
705 * waiting for us to release rq locks). In any case, when it
706 * will fire (or continue), it will see running_bw of this
707 * task migrated to later_rq (and correctly handle it).
708 */
709 sub_running_bw(&p->dl, &rq->dl);
710 sub_rq_bw(&p->dl, &rq->dl);
711
712 add_rq_bw(&p->dl, &later_rq->dl);
713 add_running_bw(&p->dl, &later_rq->dl);
714 } else {
715 sub_rq_bw(&p->dl, &rq->dl);
716 add_rq_bw(&p->dl, &later_rq->dl);
717 }
718
719 /*
720 * And we finally need to fixup root_domain(s) bandwidth accounting,
721 * since p is still hanging out in the old (now moved to default) root
722 * domain.
723 */
724 dl_b = &rq->rd->dl_bw;
725 raw_spin_lock(&dl_b->lock);
726 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
727 raw_spin_unlock(&dl_b->lock);
728
729 dl_b = &later_rq->rd->dl_bw;
730 raw_spin_lock(&dl_b->lock);
731 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
732 raw_spin_unlock(&dl_b->lock);
733
734 set_task_cpu(p, later_rq->cpu);
735 double_unlock_balance(later_rq, rq);
736
737 return later_rq;
738 }
739
740 #else
741
742 static inline
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)743 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
744 {
745 }
746
747 static inline
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)748 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
749 {
750 }
751
752 static inline
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)753 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
754 {
755 }
756
757 static inline
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)758 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
759 {
760 }
761
deadline_queue_push_tasks(struct rq * rq)762 static inline void deadline_queue_push_tasks(struct rq *rq)
763 {
764 }
765
deadline_queue_pull_task(struct rq * rq)766 static inline void deadline_queue_pull_task(struct rq *rq)
767 {
768 }
769 #endif /* CONFIG_SMP */
770
771 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
772 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
773 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
774
replenish_dl_new_period(struct sched_dl_entity * dl_se,struct rq * rq)775 static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
776 struct rq *rq)
777 {
778 /* for non-boosted task, pi_of(dl_se) == dl_se */
779 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
780 dl_se->runtime = pi_of(dl_se)->dl_runtime;
781 }
782
783 /*
784 * We are being explicitly informed that a new instance is starting,
785 * and this means that:
786 * - the absolute deadline of the entity has to be placed at
787 * current time + relative deadline;
788 * - the runtime of the entity has to be set to the maximum value.
789 *
790 * The capability of specifying such event is useful whenever a -deadline
791 * entity wants to (try to!) synchronize its behaviour with the scheduler's
792 * one, and to (try to!) reconcile itself with its own scheduling
793 * parameters.
794 */
setup_new_dl_entity(struct sched_dl_entity * dl_se)795 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
796 {
797 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
798 struct rq *rq = rq_of_dl_rq(dl_rq);
799
800 WARN_ON(is_dl_boosted(dl_se));
801 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
802
803 /*
804 * We are racing with the deadline timer. So, do nothing because
805 * the deadline timer handler will take care of properly recharging
806 * the runtime and postponing the deadline
807 */
808 if (dl_se->dl_throttled)
809 return;
810
811 /*
812 * We use the regular wall clock time to set deadlines in the
813 * future; in fact, we must consider execution overheads (time
814 * spent on hardirq context, etc.).
815 */
816 replenish_dl_new_period(dl_se, rq);
817 }
818
819 /*
820 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
821 * possibility of a entity lasting more than what it declared, and thus
822 * exhausting its runtime.
823 *
824 * Here we are interested in making runtime overrun possible, but we do
825 * not want a entity which is misbehaving to affect the scheduling of all
826 * other entities.
827 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
828 * is used, in order to confine each entity within its own bandwidth.
829 *
830 * This function deals exactly with that, and ensures that when the runtime
831 * of a entity is replenished, its deadline is also postponed. That ensures
832 * the overrunning entity can't interfere with other entity in the system and
833 * can't make them miss their deadlines. Reasons why this kind of overruns
834 * could happen are, typically, a entity voluntarily trying to overcome its
835 * runtime, or it just underestimated it during sched_setattr().
836 */
replenish_dl_entity(struct sched_dl_entity * dl_se)837 static void replenish_dl_entity(struct sched_dl_entity *dl_se)
838 {
839 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
840 struct rq *rq = rq_of_dl_rq(dl_rq);
841
842 WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0);
843
844 /*
845 * This could be the case for a !-dl task that is boosted.
846 * Just go with full inherited parameters.
847 */
848 if (dl_se->dl_deadline == 0)
849 replenish_dl_new_period(dl_se, rq);
850
851 if (dl_se->dl_yielded && dl_se->runtime > 0)
852 dl_se->runtime = 0;
853
854 /*
855 * We keep moving the deadline away until we get some
856 * available runtime for the entity. This ensures correct
857 * handling of situations where the runtime overrun is
858 * arbitrary large.
859 */
860 while (dl_se->runtime <= 0) {
861 dl_se->deadline += pi_of(dl_se)->dl_period;
862 dl_se->runtime += pi_of(dl_se)->dl_runtime;
863 }
864
865 /*
866 * At this point, the deadline really should be "in
867 * the future" with respect to rq->clock. If it's
868 * not, we are, for some reason, lagging too much!
869 * Anyway, after having warn userspace abut that,
870 * we still try to keep the things running by
871 * resetting the deadline and the budget of the
872 * entity.
873 */
874 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
875 printk_deferred_once("sched: DL replenish lagged too much\n");
876 replenish_dl_new_period(dl_se, rq);
877 }
878
879 if (dl_se->dl_yielded)
880 dl_se->dl_yielded = 0;
881 if (dl_se->dl_throttled)
882 dl_se->dl_throttled = 0;
883 }
884
885 /*
886 * Here we check if --at time t-- an entity (which is probably being
887 * [re]activated or, in general, enqueued) can use its remaining runtime
888 * and its current deadline _without_ exceeding the bandwidth it is
889 * assigned (function returns true if it can't). We are in fact applying
890 * one of the CBS rules: when a task wakes up, if the residual runtime
891 * over residual deadline fits within the allocated bandwidth, then we
892 * can keep the current (absolute) deadline and residual budget without
893 * disrupting the schedulability of the system. Otherwise, we should
894 * refill the runtime and set the deadline a period in the future,
895 * because keeping the current (absolute) deadline of the task would
896 * result in breaking guarantees promised to other tasks (refer to
897 * Documentation/scheduler/sched-deadline.rst for more information).
898 *
899 * This function returns true if:
900 *
901 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
902 *
903 * IOW we can't recycle current parameters.
904 *
905 * Notice that the bandwidth check is done against the deadline. For
906 * task with deadline equal to period this is the same of using
907 * dl_period instead of dl_deadline in the equation above.
908 */
dl_entity_overflow(struct sched_dl_entity * dl_se,u64 t)909 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
910 {
911 u64 left, right;
912
913 /*
914 * left and right are the two sides of the equation above,
915 * after a bit of shuffling to use multiplications instead
916 * of divisions.
917 *
918 * Note that none of the time values involved in the two
919 * multiplications are absolute: dl_deadline and dl_runtime
920 * are the relative deadline and the maximum runtime of each
921 * instance, runtime is the runtime left for the last instance
922 * and (deadline - t), since t is rq->clock, is the time left
923 * to the (absolute) deadline. Even if overflowing the u64 type
924 * is very unlikely to occur in both cases, here we scale down
925 * as we want to avoid that risk at all. Scaling down by 10
926 * means that we reduce granularity to 1us. We are fine with it,
927 * since this is only a true/false check and, anyway, thinking
928 * of anything below microseconds resolution is actually fiction
929 * (but still we want to give the user that illusion >;).
930 */
931 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
932 right = ((dl_se->deadline - t) >> DL_SCALE) *
933 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
934
935 return dl_time_before(right, left);
936 }
937
938 /*
939 * Revised wakeup rule [1]: For self-suspending tasks, rather then
940 * re-initializing task's runtime and deadline, the revised wakeup
941 * rule adjusts the task's runtime to avoid the task to overrun its
942 * density.
943 *
944 * Reasoning: a task may overrun the density if:
945 * runtime / (deadline - t) > dl_runtime / dl_deadline
946 *
947 * Therefore, runtime can be adjusted to:
948 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
949 *
950 * In such way that runtime will be equal to the maximum density
951 * the task can use without breaking any rule.
952 *
953 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
954 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
955 */
956 static void
update_dl_revised_wakeup(struct sched_dl_entity * dl_se,struct rq * rq)957 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
958 {
959 u64 laxity = dl_se->deadline - rq_clock(rq);
960
961 /*
962 * If the task has deadline < period, and the deadline is in the past,
963 * it should already be throttled before this check.
964 *
965 * See update_dl_entity() comments for further details.
966 */
967 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
968
969 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
970 }
971
972 /*
973 * Regarding the deadline, a task with implicit deadline has a relative
974 * deadline == relative period. A task with constrained deadline has a
975 * relative deadline <= relative period.
976 *
977 * We support constrained deadline tasks. However, there are some restrictions
978 * applied only for tasks which do not have an implicit deadline. See
979 * update_dl_entity() to know more about such restrictions.
980 *
981 * The dl_is_implicit() returns true if the task has an implicit deadline.
982 */
dl_is_implicit(struct sched_dl_entity * dl_se)983 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
984 {
985 return dl_se->dl_deadline == dl_se->dl_period;
986 }
987
988 /*
989 * When a deadline entity is placed in the runqueue, its runtime and deadline
990 * might need to be updated. This is done by a CBS wake up rule. There are two
991 * different rules: 1) the original CBS; and 2) the Revisited CBS.
992 *
993 * When the task is starting a new period, the Original CBS is used. In this
994 * case, the runtime is replenished and a new absolute deadline is set.
995 *
996 * When a task is queued before the begin of the next period, using the
997 * remaining runtime and deadline could make the entity to overflow, see
998 * dl_entity_overflow() to find more about runtime overflow. When such case
999 * is detected, the runtime and deadline need to be updated.
1000 *
1001 * If the task has an implicit deadline, i.e., deadline == period, the Original
1002 * CBS is applied. the runtime is replenished and a new absolute deadline is
1003 * set, as in the previous cases.
1004 *
1005 * However, the Original CBS does not work properly for tasks with
1006 * deadline < period, which are said to have a constrained deadline. By
1007 * applying the Original CBS, a constrained deadline task would be able to run
1008 * runtime/deadline in a period. With deadline < period, the task would
1009 * overrun the runtime/period allowed bandwidth, breaking the admission test.
1010 *
1011 * In order to prevent this misbehave, the Revisited CBS is used for
1012 * constrained deadline tasks when a runtime overflow is detected. In the
1013 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
1014 * the remaining runtime of the task is reduced to avoid runtime overflow.
1015 * Please refer to the comments update_dl_revised_wakeup() function to find
1016 * more about the Revised CBS rule.
1017 */
update_dl_entity(struct sched_dl_entity * dl_se)1018 static void update_dl_entity(struct sched_dl_entity *dl_se)
1019 {
1020 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1021 struct rq *rq = rq_of_dl_rq(dl_rq);
1022
1023 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1024 dl_entity_overflow(dl_se, rq_clock(rq))) {
1025
1026 if (unlikely(!dl_is_implicit(dl_se) &&
1027 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1028 !is_dl_boosted(dl_se))) {
1029 update_dl_revised_wakeup(dl_se, rq);
1030 return;
1031 }
1032
1033 replenish_dl_new_period(dl_se, rq);
1034 }
1035 }
1036
dl_next_period(struct sched_dl_entity * dl_se)1037 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
1038 {
1039 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1040 }
1041
1042 /*
1043 * If the entity depleted all its runtime, and if we want it to sleep
1044 * while waiting for some new execution time to become available, we
1045 * set the bandwidth replenishment timer to the replenishment instant
1046 * and try to activate it.
1047 *
1048 * Notice that it is important for the caller to know if the timer
1049 * actually started or not (i.e., the replenishment instant is in
1050 * the future or in the past).
1051 */
start_dl_timer(struct task_struct * p)1052 static int start_dl_timer(struct task_struct *p)
1053 {
1054 struct sched_dl_entity *dl_se = &p->dl;
1055 struct hrtimer *timer = &dl_se->dl_timer;
1056 struct rq *rq = task_rq(p);
1057 ktime_t now, act;
1058 s64 delta;
1059
1060 lockdep_assert_rq_held(rq);
1061
1062 /*
1063 * We want the timer to fire at the deadline, but considering
1064 * that it is actually coming from rq->clock and not from
1065 * hrtimer's time base reading.
1066 */
1067 act = ns_to_ktime(dl_next_period(dl_se));
1068 now = hrtimer_cb_get_time(timer);
1069 delta = ktime_to_ns(now) - rq_clock(rq);
1070 act = ktime_add_ns(act, delta);
1071
1072 /*
1073 * If the expiry time already passed, e.g., because the value
1074 * chosen as the deadline is too small, don't even try to
1075 * start the timer in the past!
1076 */
1077 if (ktime_us_delta(act, now) < 0)
1078 return 0;
1079
1080 /*
1081 * !enqueued will guarantee another callback; even if one is already in
1082 * progress. This ensures a balanced {get,put}_task_struct().
1083 *
1084 * The race against __run_timer() clearing the enqueued state is
1085 * harmless because we're holding task_rq()->lock, therefore the timer
1086 * expiring after we've done the check will wait on its task_rq_lock()
1087 * and observe our state.
1088 */
1089 if (!hrtimer_is_queued(timer)) {
1090 get_task_struct(p);
1091 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1092 }
1093
1094 return 1;
1095 }
1096
1097 /*
1098 * This is the bandwidth enforcement timer callback. If here, we know
1099 * a task is not on its dl_rq, since the fact that the timer was running
1100 * means the task is throttled and needs a runtime replenishment.
1101 *
1102 * However, what we actually do depends on the fact the task is active,
1103 * (it is on its rq) or has been removed from there by a call to
1104 * dequeue_task_dl(). In the former case we must issue the runtime
1105 * replenishment and add the task back to the dl_rq; in the latter, we just
1106 * do nothing but clearing dl_throttled, so that runtime and deadline
1107 * updating (and the queueing back to dl_rq) will be done by the
1108 * next call to enqueue_task_dl().
1109 */
dl_task_timer(struct hrtimer * timer)1110 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1111 {
1112 struct sched_dl_entity *dl_se = container_of(timer,
1113 struct sched_dl_entity,
1114 dl_timer);
1115 struct task_struct *p = dl_task_of(dl_se);
1116 struct rq_flags rf;
1117 struct rq *rq;
1118
1119 rq = task_rq_lock(p, &rf);
1120
1121 /*
1122 * The task might have changed its scheduling policy to something
1123 * different than SCHED_DEADLINE (through switched_from_dl()).
1124 */
1125 if (!dl_task(p))
1126 goto unlock;
1127
1128 /*
1129 * The task might have been boosted by someone else and might be in the
1130 * boosting/deboosting path, its not throttled.
1131 */
1132 if (is_dl_boosted(dl_se))
1133 goto unlock;
1134
1135 /*
1136 * Spurious timer due to start_dl_timer() race; or we already received
1137 * a replenishment from rt_mutex_setprio().
1138 */
1139 if (!dl_se->dl_throttled)
1140 goto unlock;
1141
1142 sched_clock_tick();
1143 update_rq_clock(rq);
1144
1145 /*
1146 * If the throttle happened during sched-out; like:
1147 *
1148 * schedule()
1149 * deactivate_task()
1150 * dequeue_task_dl()
1151 * update_curr_dl()
1152 * start_dl_timer()
1153 * __dequeue_task_dl()
1154 * prev->on_rq = 0;
1155 *
1156 * We can be both throttled and !queued. Replenish the counter
1157 * but do not enqueue -- wait for our wakeup to do that.
1158 */
1159 if (!task_on_rq_queued(p)) {
1160 replenish_dl_entity(dl_se);
1161 goto unlock;
1162 }
1163
1164 #ifdef CONFIG_SMP
1165 if (unlikely(!rq->online)) {
1166 /*
1167 * If the runqueue is no longer available, migrate the
1168 * task elsewhere. This necessarily changes rq.
1169 */
1170 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1171 rq = dl_task_offline_migration(rq, p);
1172 rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1173 update_rq_clock(rq);
1174
1175 /*
1176 * Now that the task has been migrated to the new RQ and we
1177 * have that locked, proceed as normal and enqueue the task
1178 * there.
1179 */
1180 }
1181 #endif
1182
1183 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1184 if (dl_task(rq->curr))
1185 check_preempt_curr_dl(rq, p, 0);
1186 else
1187 resched_curr(rq);
1188
1189 #ifdef CONFIG_SMP
1190 /*
1191 * Queueing this task back might have overloaded rq, check if we need
1192 * to kick someone away.
1193 */
1194 if (has_pushable_dl_tasks(rq)) {
1195 /*
1196 * Nothing relies on rq->lock after this, so its safe to drop
1197 * rq->lock.
1198 */
1199 rq_unpin_lock(rq, &rf);
1200 push_dl_task(rq);
1201 rq_repin_lock(rq, &rf);
1202 }
1203 #endif
1204
1205 unlock:
1206 task_rq_unlock(rq, p, &rf);
1207
1208 /*
1209 * This can free the task_struct, including this hrtimer, do not touch
1210 * anything related to that after this.
1211 */
1212 put_task_struct(p);
1213
1214 return HRTIMER_NORESTART;
1215 }
1216
init_dl_task_timer(struct sched_dl_entity * dl_se)1217 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1218 {
1219 struct hrtimer *timer = &dl_se->dl_timer;
1220
1221 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1222 timer->function = dl_task_timer;
1223 }
1224
1225 /*
1226 * During the activation, CBS checks if it can reuse the current task's
1227 * runtime and period. If the deadline of the task is in the past, CBS
1228 * cannot use the runtime, and so it replenishes the task. This rule
1229 * works fine for implicit deadline tasks (deadline == period), and the
1230 * CBS was designed for implicit deadline tasks. However, a task with
1231 * constrained deadline (deadline < period) might be awakened after the
1232 * deadline, but before the next period. In this case, replenishing the
1233 * task would allow it to run for runtime / deadline. As in this case
1234 * deadline < period, CBS enables a task to run for more than the
1235 * runtime / period. In a very loaded system, this can cause a domino
1236 * effect, making other tasks miss their deadlines.
1237 *
1238 * To avoid this problem, in the activation of a constrained deadline
1239 * task after the deadline but before the next period, throttle the
1240 * task and set the replenishing timer to the begin of the next period,
1241 * unless it is boosted.
1242 */
dl_check_constrained_dl(struct sched_dl_entity * dl_se)1243 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1244 {
1245 struct task_struct *p = dl_task_of(dl_se);
1246 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1247
1248 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1249 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1250 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1251 return;
1252 dl_se->dl_throttled = 1;
1253 if (dl_se->runtime > 0)
1254 dl_se->runtime = 0;
1255 }
1256 }
1257
1258 static
dl_runtime_exceeded(struct sched_dl_entity * dl_se)1259 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1260 {
1261 return (dl_se->runtime <= 0);
1262 }
1263
1264 /*
1265 * This function implements the GRUB accounting rule:
1266 * according to the GRUB reclaiming algorithm, the runtime is
1267 * not decreased as "dq = -dt", but as
1268 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1269 * where u is the utilization of the task, Umax is the maximum reclaimable
1270 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1271 * as the difference between the "total runqueue utilization" and the
1272 * runqueue active utilization, and Uextra is the (per runqueue) extra
1273 * reclaimable utilization.
1274 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1275 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1276 * BW_SHIFT.
1277 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT,
1278 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1279 * Since delta is a 64 bit variable, to have an overflow its value
1280 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1281 * So, overflow is not an issue here.
1282 */
grub_reclaim(u64 delta,struct rq * rq,struct sched_dl_entity * dl_se)1283 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1284 {
1285 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1286 u64 u_act;
1287 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1288
1289 /*
1290 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1291 * we compare u_inact + rq->dl.extra_bw with
1292 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1293 * u_inact + rq->dl.extra_bw can be larger than
1294 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1295 * leading to wrong results)
1296 */
1297 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1298 u_act = u_act_min;
1299 else
1300 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1301
1302 return (delta * u_act) >> BW_SHIFT;
1303 }
1304
1305 /*
1306 * Update the current task's runtime statistics (provided it is still
1307 * a -deadline task and has not been removed from the dl_rq).
1308 */
update_curr_dl(struct rq * rq)1309 static void update_curr_dl(struct rq *rq)
1310 {
1311 struct task_struct *curr = rq->curr;
1312 struct sched_dl_entity *dl_se = &curr->dl;
1313 u64 delta_exec, scaled_delta_exec;
1314 int cpu = cpu_of(rq);
1315 u64 now;
1316
1317 if (!dl_task(curr) || !on_dl_rq(dl_se))
1318 return;
1319
1320 /*
1321 * Consumed budget is computed considering the time as
1322 * observed by schedulable tasks (excluding time spent
1323 * in hardirq context, etc.). Deadlines are instead
1324 * computed using hard walltime. This seems to be the more
1325 * natural solution, but the full ramifications of this
1326 * approach need further study.
1327 */
1328 now = rq_clock_task(rq);
1329 delta_exec = now - curr->se.exec_start;
1330 if (unlikely((s64)delta_exec <= 0)) {
1331 if (unlikely(dl_se->dl_yielded))
1332 goto throttle;
1333 return;
1334 }
1335
1336 schedstat_set(curr->stats.exec_max,
1337 max(curr->stats.exec_max, delta_exec));
1338
1339 trace_sched_stat_runtime(curr, delta_exec, 0);
1340
1341 update_current_exec_runtime(curr, now, delta_exec);
1342
1343 if (dl_entity_is_special(dl_se))
1344 return;
1345
1346 /*
1347 * For tasks that participate in GRUB, we implement GRUB-PA: the
1348 * spare reclaimed bandwidth is used to clock down frequency.
1349 *
1350 * For the others, we still need to scale reservation parameters
1351 * according to current frequency and CPU maximum capacity.
1352 */
1353 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1354 scaled_delta_exec = grub_reclaim(delta_exec,
1355 rq,
1356 &curr->dl);
1357 } else {
1358 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1359 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1360
1361 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1362 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1363 }
1364
1365 dl_se->runtime -= scaled_delta_exec;
1366
1367 throttle:
1368 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1369 dl_se->dl_throttled = 1;
1370
1371 /* If requested, inform the user about runtime overruns. */
1372 if (dl_runtime_exceeded(dl_se) &&
1373 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1374 dl_se->dl_overrun = 1;
1375
1376 __dequeue_task_dl(rq, curr, 0);
1377 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1378 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1379
1380 if (!is_leftmost(curr, &rq->dl))
1381 resched_curr(rq);
1382 }
1383
1384 /*
1385 * Because -- for now -- we share the rt bandwidth, we need to
1386 * account our runtime there too, otherwise actual rt tasks
1387 * would be able to exceed the shared quota.
1388 *
1389 * Account to the root rt group for now.
1390 *
1391 * The solution we're working towards is having the RT groups scheduled
1392 * using deadline servers -- however there's a few nasties to figure
1393 * out before that can happen.
1394 */
1395 if (rt_bandwidth_enabled()) {
1396 struct rt_rq *rt_rq = &rq->rt;
1397
1398 raw_spin_lock(&rt_rq->rt_runtime_lock);
1399 /*
1400 * We'll let actual RT tasks worry about the overflow here, we
1401 * have our own CBS to keep us inline; only account when RT
1402 * bandwidth is relevant.
1403 */
1404 if (sched_rt_bandwidth_account(rt_rq))
1405 rt_rq->rt_time += delta_exec;
1406 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1407 }
1408 }
1409
inactive_task_timer(struct hrtimer * timer)1410 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1411 {
1412 struct sched_dl_entity *dl_se = container_of(timer,
1413 struct sched_dl_entity,
1414 inactive_timer);
1415 struct task_struct *p = dl_task_of(dl_se);
1416 struct rq_flags rf;
1417 struct rq *rq;
1418
1419 rq = task_rq_lock(p, &rf);
1420
1421 sched_clock_tick();
1422 update_rq_clock(rq);
1423
1424 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1425 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1426
1427 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1428 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1429 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1430 dl_se->dl_non_contending = 0;
1431 }
1432
1433 raw_spin_lock(&dl_b->lock);
1434 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1435 raw_spin_unlock(&dl_b->lock);
1436 __dl_clear_params(p);
1437
1438 goto unlock;
1439 }
1440 if (dl_se->dl_non_contending == 0)
1441 goto unlock;
1442
1443 sub_running_bw(dl_se, &rq->dl);
1444 dl_se->dl_non_contending = 0;
1445 unlock:
1446 task_rq_unlock(rq, p, &rf);
1447 put_task_struct(p);
1448
1449 return HRTIMER_NORESTART;
1450 }
1451
init_dl_inactive_task_timer(struct sched_dl_entity * dl_se)1452 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1453 {
1454 struct hrtimer *timer = &dl_se->inactive_timer;
1455
1456 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1457 timer->function = inactive_task_timer;
1458 }
1459
1460 #define __node_2_dle(node) \
1461 rb_entry((node), struct sched_dl_entity, rb_node)
1462
1463 #ifdef CONFIG_SMP
1464
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1465 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1466 {
1467 struct rq *rq = rq_of_dl_rq(dl_rq);
1468
1469 if (dl_rq->earliest_dl.curr == 0 ||
1470 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1471 if (dl_rq->earliest_dl.curr == 0)
1472 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1473 dl_rq->earliest_dl.curr = deadline;
1474 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1475 }
1476 }
1477
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1478 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1479 {
1480 struct rq *rq = rq_of_dl_rq(dl_rq);
1481
1482 /*
1483 * Since we may have removed our earliest (and/or next earliest)
1484 * task we must recompute them.
1485 */
1486 if (!dl_rq->dl_nr_running) {
1487 dl_rq->earliest_dl.curr = 0;
1488 dl_rq->earliest_dl.next = 0;
1489 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1490 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1491 } else {
1492 struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
1493 struct sched_dl_entity *entry = __node_2_dle(leftmost);
1494
1495 dl_rq->earliest_dl.curr = entry->deadline;
1496 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1497 }
1498 }
1499
1500 #else
1501
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1502 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1503 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1504
1505 #endif /* CONFIG_SMP */
1506
1507 static inline
inc_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)1508 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1509 {
1510 int prio = dl_task_of(dl_se)->prio;
1511 u64 deadline = dl_se->deadline;
1512
1513 WARN_ON(!dl_prio(prio));
1514 dl_rq->dl_nr_running++;
1515 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1516
1517 inc_dl_deadline(dl_rq, deadline);
1518 inc_dl_migration(dl_se, dl_rq);
1519 }
1520
1521 static inline
dec_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)1522 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1523 {
1524 int prio = dl_task_of(dl_se)->prio;
1525
1526 WARN_ON(!dl_prio(prio));
1527 WARN_ON(!dl_rq->dl_nr_running);
1528 dl_rq->dl_nr_running--;
1529 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1530
1531 dec_dl_deadline(dl_rq, dl_se->deadline);
1532 dec_dl_migration(dl_se, dl_rq);
1533 }
1534
__dl_less(struct rb_node * a,const struct rb_node * b)1535 static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1536 {
1537 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1538 }
1539
1540 static inline struct sched_statistics *
__schedstats_from_dl_se(struct sched_dl_entity * dl_se)1541 __schedstats_from_dl_se(struct sched_dl_entity *dl_se)
1542 {
1543 return &dl_task_of(dl_se)->stats;
1544 }
1545
1546 static inline void
update_stats_wait_start_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se)1547 update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1548 {
1549 struct sched_statistics *stats;
1550
1551 if (!schedstat_enabled())
1552 return;
1553
1554 stats = __schedstats_from_dl_se(dl_se);
1555 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1556 }
1557
1558 static inline void
update_stats_wait_end_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se)1559 update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1560 {
1561 struct sched_statistics *stats;
1562
1563 if (!schedstat_enabled())
1564 return;
1565
1566 stats = __schedstats_from_dl_se(dl_se);
1567 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1568 }
1569
1570 static inline void
update_stats_enqueue_sleeper_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se)1571 update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1572 {
1573 struct sched_statistics *stats;
1574
1575 if (!schedstat_enabled())
1576 return;
1577
1578 stats = __schedstats_from_dl_se(dl_se);
1579 __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1580 }
1581
1582 static inline void
update_stats_enqueue_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se,int flags)1583 update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1584 int flags)
1585 {
1586 if (!schedstat_enabled())
1587 return;
1588
1589 if (flags & ENQUEUE_WAKEUP)
1590 update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
1591 }
1592
1593 static inline void
update_stats_dequeue_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se,int flags)1594 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1595 int flags)
1596 {
1597 struct task_struct *p = dl_task_of(dl_se);
1598
1599 if (!schedstat_enabled())
1600 return;
1601
1602 if ((flags & DEQUEUE_SLEEP)) {
1603 unsigned int state;
1604
1605 state = READ_ONCE(p->__state);
1606 if (state & TASK_INTERRUPTIBLE)
1607 __schedstat_set(p->stats.sleep_start,
1608 rq_clock(rq_of_dl_rq(dl_rq)));
1609
1610 if (state & TASK_UNINTERRUPTIBLE)
1611 __schedstat_set(p->stats.block_start,
1612 rq_clock(rq_of_dl_rq(dl_rq)));
1613 }
1614 }
1615
__enqueue_dl_entity(struct sched_dl_entity * dl_se)1616 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1617 {
1618 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1619
1620 WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));
1621
1622 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
1623
1624 inc_dl_tasks(dl_se, dl_rq);
1625 }
1626
__dequeue_dl_entity(struct sched_dl_entity * dl_se)1627 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1628 {
1629 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1630
1631 if (RB_EMPTY_NODE(&dl_se->rb_node))
1632 return;
1633
1634 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1635
1636 RB_CLEAR_NODE(&dl_se->rb_node);
1637
1638 dec_dl_tasks(dl_se, dl_rq);
1639 }
1640
1641 static void
enqueue_dl_entity(struct sched_dl_entity * dl_se,int flags)1642 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1643 {
1644 WARN_ON_ONCE(on_dl_rq(dl_se));
1645
1646 update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
1647
1648 /*
1649 * If this is a wakeup or a new instance, the scheduling
1650 * parameters of the task might need updating. Otherwise,
1651 * we want a replenishment of its runtime.
1652 */
1653 if (flags & ENQUEUE_WAKEUP) {
1654 task_contending(dl_se, flags);
1655 update_dl_entity(dl_se);
1656 } else if (flags & ENQUEUE_REPLENISH) {
1657 replenish_dl_entity(dl_se);
1658 } else if ((flags & ENQUEUE_RESTORE) &&
1659 dl_time_before(dl_se->deadline,
1660 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1661 setup_new_dl_entity(dl_se);
1662 }
1663
1664 __enqueue_dl_entity(dl_se);
1665 }
1666
dequeue_dl_entity(struct sched_dl_entity * dl_se)1667 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1668 {
1669 __dequeue_dl_entity(dl_se);
1670 }
1671
enqueue_task_dl(struct rq * rq,struct task_struct * p,int flags)1672 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1673 {
1674 if (is_dl_boosted(&p->dl)) {
1675 /*
1676 * Because of delays in the detection of the overrun of a
1677 * thread's runtime, it might be the case that a thread
1678 * goes to sleep in a rt mutex with negative runtime. As
1679 * a consequence, the thread will be throttled.
1680 *
1681 * While waiting for the mutex, this thread can also be
1682 * boosted via PI, resulting in a thread that is throttled
1683 * and boosted at the same time.
1684 *
1685 * In this case, the boost overrides the throttle.
1686 */
1687 if (p->dl.dl_throttled) {
1688 /*
1689 * The replenish timer needs to be canceled. No
1690 * problem if it fires concurrently: boosted threads
1691 * are ignored in dl_task_timer().
1692 */
1693 hrtimer_try_to_cancel(&p->dl.dl_timer);
1694 p->dl.dl_throttled = 0;
1695 }
1696 } else if (!dl_prio(p->normal_prio)) {
1697 /*
1698 * Special case in which we have a !SCHED_DEADLINE task that is going
1699 * to be deboosted, but exceeds its runtime while doing so. No point in
1700 * replenishing it, as it's going to return back to its original
1701 * scheduling class after this. If it has been throttled, we need to
1702 * clear the flag, otherwise the task may wake up as throttled after
1703 * being boosted again with no means to replenish the runtime and clear
1704 * the throttle.
1705 */
1706 p->dl.dl_throttled = 0;
1707 if (!(flags & ENQUEUE_REPLENISH))
1708 printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
1709 task_pid_nr(p));
1710
1711 return;
1712 }
1713
1714 /*
1715 * Check if a constrained deadline task was activated
1716 * after the deadline but before the next period.
1717 * If that is the case, the task will be throttled and
1718 * the replenishment timer will be set to the next period.
1719 */
1720 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1721 dl_check_constrained_dl(&p->dl);
1722
1723 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1724 add_rq_bw(&p->dl, &rq->dl);
1725 add_running_bw(&p->dl, &rq->dl);
1726 }
1727
1728 /*
1729 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1730 * its budget it needs a replenishment and, since it now is on
1731 * its rq, the bandwidth timer callback (which clearly has not
1732 * run yet) will take care of this.
1733 * However, the active utilization does not depend on the fact
1734 * that the task is on the runqueue or not (but depends on the
1735 * task's state - in GRUB parlance, "inactive" vs "active contending").
1736 * In other words, even if a task is throttled its utilization must
1737 * be counted in the active utilization; hence, we need to call
1738 * add_running_bw().
1739 */
1740 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1741 if (flags & ENQUEUE_WAKEUP)
1742 task_contending(&p->dl, flags);
1743
1744 return;
1745 }
1746
1747 check_schedstat_required();
1748 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
1749
1750 enqueue_dl_entity(&p->dl, flags);
1751
1752 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1753 enqueue_pushable_dl_task(rq, p);
1754 }
1755
__dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1756 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1757 {
1758 update_stats_dequeue_dl(&rq->dl, &p->dl, flags);
1759 dequeue_dl_entity(&p->dl);
1760 dequeue_pushable_dl_task(rq, p);
1761 }
1762
dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1763 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1764 {
1765 update_curr_dl(rq);
1766 __dequeue_task_dl(rq, p, flags);
1767
1768 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1769 sub_running_bw(&p->dl, &rq->dl);
1770 sub_rq_bw(&p->dl, &rq->dl);
1771 }
1772
1773 /*
1774 * This check allows to start the inactive timer (or to immediately
1775 * decrease the active utilization, if needed) in two cases:
1776 * when the task blocks and when it is terminating
1777 * (p->state == TASK_DEAD). We can handle the two cases in the same
1778 * way, because from GRUB's point of view the same thing is happening
1779 * (the task moves from "active contending" to "active non contending"
1780 * or "inactive")
1781 */
1782 if (flags & DEQUEUE_SLEEP)
1783 task_non_contending(p);
1784 }
1785
1786 /*
1787 * Yield task semantic for -deadline tasks is:
1788 *
1789 * get off from the CPU until our next instance, with
1790 * a new runtime. This is of little use now, since we
1791 * don't have a bandwidth reclaiming mechanism. Anyway,
1792 * bandwidth reclaiming is planned for the future, and
1793 * yield_task_dl will indicate that some spare budget
1794 * is available for other task instances to use it.
1795 */
yield_task_dl(struct rq * rq)1796 static void yield_task_dl(struct rq *rq)
1797 {
1798 /*
1799 * We make the task go to sleep until its current deadline by
1800 * forcing its runtime to zero. This way, update_curr_dl() stops
1801 * it and the bandwidth timer will wake it up and will give it
1802 * new scheduling parameters (thanks to dl_yielded=1).
1803 */
1804 rq->curr->dl.dl_yielded = 1;
1805
1806 update_rq_clock(rq);
1807 update_curr_dl(rq);
1808 /*
1809 * Tell update_rq_clock() that we've just updated,
1810 * so we don't do microscopic update in schedule()
1811 * and double the fastpath cost.
1812 */
1813 rq_clock_skip_update(rq);
1814 }
1815
1816 #ifdef CONFIG_SMP
1817
dl_task_is_earliest_deadline(struct task_struct * p,struct rq * rq)1818 static inline bool dl_task_is_earliest_deadline(struct task_struct *p,
1819 struct rq *rq)
1820 {
1821 return (!rq->dl.dl_nr_running ||
1822 dl_time_before(p->dl.deadline,
1823 rq->dl.earliest_dl.curr));
1824 }
1825
1826 static int find_later_rq(struct task_struct *task);
1827
1828 static int
select_task_rq_dl(struct task_struct * p,int cpu,int flags)1829 select_task_rq_dl(struct task_struct *p, int cpu, int flags)
1830 {
1831 struct task_struct *curr;
1832 bool select_rq;
1833 struct rq *rq;
1834
1835 if (!(flags & WF_TTWU))
1836 goto out;
1837
1838 rq = cpu_rq(cpu);
1839
1840 rcu_read_lock();
1841 curr = READ_ONCE(rq->curr); /* unlocked access */
1842
1843 /*
1844 * If we are dealing with a -deadline task, we must
1845 * decide where to wake it up.
1846 * If it has a later deadline and the current task
1847 * on this rq can't move (provided the waking task
1848 * can!) we prefer to send it somewhere else. On the
1849 * other hand, if it has a shorter deadline, we
1850 * try to make it stay here, it might be important.
1851 */
1852 select_rq = unlikely(dl_task(curr)) &&
1853 (curr->nr_cpus_allowed < 2 ||
1854 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1855 p->nr_cpus_allowed > 1;
1856
1857 /*
1858 * Take the capacity of the CPU into account to
1859 * ensure it fits the requirement of the task.
1860 */
1861 if (sched_asym_cpucap_active())
1862 select_rq |= !dl_task_fits_capacity(p, cpu);
1863
1864 if (select_rq) {
1865 int target = find_later_rq(p);
1866
1867 if (target != -1 &&
1868 dl_task_is_earliest_deadline(p, cpu_rq(target)))
1869 cpu = target;
1870 }
1871 rcu_read_unlock();
1872
1873 out:
1874 return cpu;
1875 }
1876
migrate_task_rq_dl(struct task_struct * p,int new_cpu __maybe_unused)1877 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1878 {
1879 struct rq_flags rf;
1880 struct rq *rq;
1881
1882 if (READ_ONCE(p->__state) != TASK_WAKING)
1883 return;
1884
1885 rq = task_rq(p);
1886 /*
1887 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1888 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1889 * rq->lock is not... So, lock it
1890 */
1891 rq_lock(rq, &rf);
1892 if (p->dl.dl_non_contending) {
1893 update_rq_clock(rq);
1894 sub_running_bw(&p->dl, &rq->dl);
1895 p->dl.dl_non_contending = 0;
1896 /*
1897 * If the timer handler is currently running and the
1898 * timer cannot be canceled, inactive_task_timer()
1899 * will see that dl_not_contending is not set, and
1900 * will not touch the rq's active utilization,
1901 * so we are still safe.
1902 */
1903 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1904 put_task_struct(p);
1905 }
1906 sub_rq_bw(&p->dl, &rq->dl);
1907 rq_unlock(rq, &rf);
1908 }
1909
check_preempt_equal_dl(struct rq * rq,struct task_struct * p)1910 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1911 {
1912 /*
1913 * Current can't be migrated, useless to reschedule,
1914 * let's hope p can move out.
1915 */
1916 if (rq->curr->nr_cpus_allowed == 1 ||
1917 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1918 return;
1919
1920 /*
1921 * p is migratable, so let's not schedule it and
1922 * see if it is pushed or pulled somewhere else.
1923 */
1924 if (p->nr_cpus_allowed != 1 &&
1925 cpudl_find(&rq->rd->cpudl, p, NULL))
1926 return;
1927
1928 resched_curr(rq);
1929 }
1930
balance_dl(struct rq * rq,struct task_struct * p,struct rq_flags * rf)1931 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1932 {
1933 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1934 /*
1935 * This is OK, because current is on_cpu, which avoids it being
1936 * picked for load-balance and preemption/IRQs are still
1937 * disabled avoiding further scheduler activity on it and we've
1938 * not yet started the picking loop.
1939 */
1940 rq_unpin_lock(rq, rf);
1941 pull_dl_task(rq);
1942 rq_repin_lock(rq, rf);
1943 }
1944
1945 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1946 }
1947 #endif /* CONFIG_SMP */
1948
1949 /*
1950 * Only called when both the current and waking task are -deadline
1951 * tasks.
1952 */
check_preempt_curr_dl(struct rq * rq,struct task_struct * p,int flags)1953 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1954 int flags)
1955 {
1956 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1957 resched_curr(rq);
1958 return;
1959 }
1960
1961 #ifdef CONFIG_SMP
1962 /*
1963 * In the unlikely case current and p have the same deadline
1964 * let us try to decide what's the best thing to do...
1965 */
1966 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1967 !test_tsk_need_resched(rq->curr))
1968 check_preempt_equal_dl(rq, p);
1969 #endif /* CONFIG_SMP */
1970 }
1971
1972 #ifdef CONFIG_SCHED_HRTICK
start_hrtick_dl(struct rq * rq,struct task_struct * p)1973 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1974 {
1975 hrtick_start(rq, p->dl.runtime);
1976 }
1977 #else /* !CONFIG_SCHED_HRTICK */
start_hrtick_dl(struct rq * rq,struct task_struct * p)1978 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1979 {
1980 }
1981 #endif
1982
set_next_task_dl(struct rq * rq,struct task_struct * p,bool first)1983 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1984 {
1985 struct sched_dl_entity *dl_se = &p->dl;
1986 struct dl_rq *dl_rq = &rq->dl;
1987
1988 p->se.exec_start = rq_clock_task(rq);
1989 if (on_dl_rq(&p->dl))
1990 update_stats_wait_end_dl(dl_rq, dl_se);
1991
1992 /* You can't push away the running task */
1993 dequeue_pushable_dl_task(rq, p);
1994
1995 if (!first)
1996 return;
1997
1998 if (hrtick_enabled_dl(rq))
1999 start_hrtick_dl(rq, p);
2000
2001 if (rq->curr->sched_class != &dl_sched_class)
2002 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2003
2004 deadline_queue_push_tasks(rq);
2005 }
2006
pick_next_dl_entity(struct dl_rq * dl_rq)2007 static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
2008 {
2009 struct rb_node *left = rb_first_cached(&dl_rq->root);
2010
2011 if (!left)
2012 return NULL;
2013
2014 return __node_2_dle(left);
2015 }
2016
pick_task_dl(struct rq * rq)2017 static struct task_struct *pick_task_dl(struct rq *rq)
2018 {
2019 struct sched_dl_entity *dl_se;
2020 struct dl_rq *dl_rq = &rq->dl;
2021 struct task_struct *p;
2022
2023 if (!sched_dl_runnable(rq))
2024 return NULL;
2025
2026 dl_se = pick_next_dl_entity(dl_rq);
2027 WARN_ON_ONCE(!dl_se);
2028 p = dl_task_of(dl_se);
2029
2030 return p;
2031 }
2032
pick_next_task_dl(struct rq * rq)2033 static struct task_struct *pick_next_task_dl(struct rq *rq)
2034 {
2035 struct task_struct *p;
2036
2037 p = pick_task_dl(rq);
2038 if (p)
2039 set_next_task_dl(rq, p, true);
2040
2041 return p;
2042 }
2043
put_prev_task_dl(struct rq * rq,struct task_struct * p)2044 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
2045 {
2046 struct sched_dl_entity *dl_se = &p->dl;
2047 struct dl_rq *dl_rq = &rq->dl;
2048
2049 if (on_dl_rq(&p->dl))
2050 update_stats_wait_start_dl(dl_rq, dl_se);
2051
2052 update_curr_dl(rq);
2053
2054 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2055 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
2056 enqueue_pushable_dl_task(rq, p);
2057 }
2058
2059 /*
2060 * scheduler tick hitting a task of our scheduling class.
2061 *
2062 * NOTE: This function can be called remotely by the tick offload that
2063 * goes along full dynticks. Therefore no local assumption can be made
2064 * and everything must be accessed through the @rq and @curr passed in
2065 * parameters.
2066 */
task_tick_dl(struct rq * rq,struct task_struct * p,int queued)2067 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2068 {
2069 update_curr_dl(rq);
2070
2071 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2072 /*
2073 * Even when we have runtime, update_curr_dl() might have resulted in us
2074 * not being the leftmost task anymore. In that case NEED_RESCHED will
2075 * be set and schedule() will start a new hrtick for the next task.
2076 */
2077 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2078 is_leftmost(p, &rq->dl))
2079 start_hrtick_dl(rq, p);
2080 }
2081
task_fork_dl(struct task_struct * p)2082 static void task_fork_dl(struct task_struct *p)
2083 {
2084 /*
2085 * SCHED_DEADLINE tasks cannot fork and this is achieved through
2086 * sched_fork()
2087 */
2088 }
2089
2090 #ifdef CONFIG_SMP
2091
2092 /* Only try algorithms three times */
2093 #define DL_MAX_TRIES 3
2094
pick_dl_task(struct rq * rq,struct task_struct * p,int cpu)2095 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2096 {
2097 if (!task_on_cpu(rq, p) &&
2098 cpumask_test_cpu(cpu, &p->cpus_mask))
2099 return 1;
2100 return 0;
2101 }
2102
2103 /*
2104 * Return the earliest pushable rq's task, which is suitable to be executed
2105 * on the CPU, NULL otherwise:
2106 */
pick_earliest_pushable_dl_task(struct rq * rq,int cpu)2107 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2108 {
2109 struct task_struct *p = NULL;
2110 struct rb_node *next_node;
2111
2112 if (!has_pushable_dl_tasks(rq))
2113 return NULL;
2114
2115 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2116
2117 next_node:
2118 if (next_node) {
2119 p = __node_2_pdl(next_node);
2120
2121 if (pick_dl_task(rq, p, cpu))
2122 return p;
2123
2124 next_node = rb_next(next_node);
2125 goto next_node;
2126 }
2127
2128 return NULL;
2129 }
2130
2131 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2132
find_later_rq(struct task_struct * task)2133 static int find_later_rq(struct task_struct *task)
2134 {
2135 struct sched_domain *sd;
2136 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2137 int this_cpu = smp_processor_id();
2138 int cpu = task_cpu(task);
2139
2140 /* Make sure the mask is initialized first */
2141 if (unlikely(!later_mask))
2142 return -1;
2143
2144 if (task->nr_cpus_allowed == 1)
2145 return -1;
2146
2147 /*
2148 * We have to consider system topology and task affinity
2149 * first, then we can look for a suitable CPU.
2150 */
2151 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
2152 return -1;
2153
2154 /*
2155 * If we are here, some targets have been found, including
2156 * the most suitable which is, among the runqueues where the
2157 * current tasks have later deadlines than the task's one, the
2158 * rq with the latest possible one.
2159 *
2160 * Now we check how well this matches with task's
2161 * affinity and system topology.
2162 *
2163 * The last CPU where the task run is our first
2164 * guess, since it is most likely cache-hot there.
2165 */
2166 if (cpumask_test_cpu(cpu, later_mask))
2167 return cpu;
2168 /*
2169 * Check if this_cpu is to be skipped (i.e., it is
2170 * not in the mask) or not.
2171 */
2172 if (!cpumask_test_cpu(this_cpu, later_mask))
2173 this_cpu = -1;
2174
2175 rcu_read_lock();
2176 for_each_domain(cpu, sd) {
2177 if (sd->flags & SD_WAKE_AFFINE) {
2178 int best_cpu;
2179
2180 /*
2181 * If possible, preempting this_cpu is
2182 * cheaper than migrating.
2183 */
2184 if (this_cpu != -1 &&
2185 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2186 rcu_read_unlock();
2187 return this_cpu;
2188 }
2189
2190 best_cpu = cpumask_any_and_distribute(later_mask,
2191 sched_domain_span(sd));
2192 /*
2193 * Last chance: if a CPU being in both later_mask
2194 * and current sd span is valid, that becomes our
2195 * choice. Of course, the latest possible CPU is
2196 * already under consideration through later_mask.
2197 */
2198 if (best_cpu < nr_cpu_ids) {
2199 rcu_read_unlock();
2200 return best_cpu;
2201 }
2202 }
2203 }
2204 rcu_read_unlock();
2205
2206 /*
2207 * At this point, all our guesses failed, we just return
2208 * 'something', and let the caller sort the things out.
2209 */
2210 if (this_cpu != -1)
2211 return this_cpu;
2212
2213 cpu = cpumask_any_distribute(later_mask);
2214 if (cpu < nr_cpu_ids)
2215 return cpu;
2216
2217 return -1;
2218 }
2219
2220 /* Locks the rq it finds */
find_lock_later_rq(struct task_struct * task,struct rq * rq)2221 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2222 {
2223 struct rq *later_rq = NULL;
2224 int tries;
2225 int cpu;
2226
2227 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2228 cpu = find_later_rq(task);
2229
2230 if ((cpu == -1) || (cpu == rq->cpu))
2231 break;
2232
2233 later_rq = cpu_rq(cpu);
2234
2235 if (!dl_task_is_earliest_deadline(task, later_rq)) {
2236 /*
2237 * Target rq has tasks of equal or earlier deadline,
2238 * retrying does not release any lock and is unlikely
2239 * to yield a different result.
2240 */
2241 later_rq = NULL;
2242 break;
2243 }
2244
2245 /* Retry if something changed. */
2246 if (double_lock_balance(rq, later_rq)) {
2247 if (unlikely(task_rq(task) != rq ||
2248 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2249 task_on_cpu(rq, task) ||
2250 !dl_task(task) ||
2251 is_migration_disabled(task) ||
2252 !task_on_rq_queued(task))) {
2253 double_unlock_balance(rq, later_rq);
2254 later_rq = NULL;
2255 break;
2256 }
2257 }
2258
2259 /*
2260 * If the rq we found has no -deadline task, or
2261 * its earliest one has a later deadline than our
2262 * task, the rq is a good one.
2263 */
2264 if (dl_task_is_earliest_deadline(task, later_rq))
2265 break;
2266
2267 /* Otherwise we try again. */
2268 double_unlock_balance(rq, later_rq);
2269 later_rq = NULL;
2270 }
2271
2272 return later_rq;
2273 }
2274
pick_next_pushable_dl_task(struct rq * rq)2275 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2276 {
2277 struct task_struct *p;
2278
2279 if (!has_pushable_dl_tasks(rq))
2280 return NULL;
2281
2282 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
2283
2284 WARN_ON_ONCE(rq->cpu != task_cpu(p));
2285 WARN_ON_ONCE(task_current(rq, p));
2286 WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
2287
2288 WARN_ON_ONCE(!task_on_rq_queued(p));
2289 WARN_ON_ONCE(!dl_task(p));
2290
2291 return p;
2292 }
2293
2294 /*
2295 * See if the non running -deadline tasks on this rq
2296 * can be sent to some other CPU where they can preempt
2297 * and start executing.
2298 */
push_dl_task(struct rq * rq)2299 static int push_dl_task(struct rq *rq)
2300 {
2301 struct task_struct *next_task;
2302 struct rq *later_rq;
2303 int ret = 0;
2304
2305 if (!rq->dl.overloaded)
2306 return 0;
2307
2308 next_task = pick_next_pushable_dl_task(rq);
2309 if (!next_task)
2310 return 0;
2311
2312 retry:
2313 /*
2314 * If next_task preempts rq->curr, and rq->curr
2315 * can move away, it makes sense to just reschedule
2316 * without going further in pushing next_task.
2317 */
2318 if (dl_task(rq->curr) &&
2319 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2320 rq->curr->nr_cpus_allowed > 1) {
2321 resched_curr(rq);
2322 return 0;
2323 }
2324
2325 if (is_migration_disabled(next_task))
2326 return 0;
2327
2328 if (WARN_ON(next_task == rq->curr))
2329 return 0;
2330
2331 /* We might release rq lock */
2332 get_task_struct(next_task);
2333
2334 /* Will lock the rq it'll find */
2335 later_rq = find_lock_later_rq(next_task, rq);
2336 if (!later_rq) {
2337 struct task_struct *task;
2338
2339 /*
2340 * We must check all this again, since
2341 * find_lock_later_rq releases rq->lock and it is
2342 * then possible that next_task has migrated.
2343 */
2344 task = pick_next_pushable_dl_task(rq);
2345 if (task == next_task) {
2346 /*
2347 * The task is still there. We don't try
2348 * again, some other CPU will pull it when ready.
2349 */
2350 goto out;
2351 }
2352
2353 if (!task)
2354 /* No more tasks */
2355 goto out;
2356
2357 put_task_struct(next_task);
2358 next_task = task;
2359 goto retry;
2360 }
2361
2362 deactivate_task(rq, next_task, 0);
2363 set_task_cpu(next_task, later_rq->cpu);
2364 activate_task(later_rq, next_task, 0);
2365 ret = 1;
2366
2367 resched_curr(later_rq);
2368
2369 double_unlock_balance(rq, later_rq);
2370
2371 out:
2372 put_task_struct(next_task);
2373
2374 return ret;
2375 }
2376
push_dl_tasks(struct rq * rq)2377 static void push_dl_tasks(struct rq *rq)
2378 {
2379 /* push_dl_task() will return true if it moved a -deadline task */
2380 while (push_dl_task(rq))
2381 ;
2382 }
2383
pull_dl_task(struct rq * this_rq)2384 static void pull_dl_task(struct rq *this_rq)
2385 {
2386 int this_cpu = this_rq->cpu, cpu;
2387 struct task_struct *p, *push_task;
2388 bool resched = false;
2389 struct rq *src_rq;
2390 u64 dmin = LONG_MAX;
2391
2392 if (likely(!dl_overloaded(this_rq)))
2393 return;
2394
2395 /*
2396 * Match the barrier from dl_set_overloaded; this guarantees that if we
2397 * see overloaded we must also see the dlo_mask bit.
2398 */
2399 smp_rmb();
2400
2401 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2402 if (this_cpu == cpu)
2403 continue;
2404
2405 src_rq = cpu_rq(cpu);
2406
2407 /*
2408 * It looks racy, abd it is! However, as in sched_rt.c,
2409 * we are fine with this.
2410 */
2411 if (this_rq->dl.dl_nr_running &&
2412 dl_time_before(this_rq->dl.earliest_dl.curr,
2413 src_rq->dl.earliest_dl.next))
2414 continue;
2415
2416 /* Might drop this_rq->lock */
2417 push_task = NULL;
2418 double_lock_balance(this_rq, src_rq);
2419
2420 /*
2421 * If there are no more pullable tasks on the
2422 * rq, we're done with it.
2423 */
2424 if (src_rq->dl.dl_nr_running <= 1)
2425 goto skip;
2426
2427 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2428
2429 /*
2430 * We found a task to be pulled if:
2431 * - it preempts our current (if there's one),
2432 * - it will preempt the last one we pulled (if any).
2433 */
2434 if (p && dl_time_before(p->dl.deadline, dmin) &&
2435 dl_task_is_earliest_deadline(p, this_rq)) {
2436 WARN_ON(p == src_rq->curr);
2437 WARN_ON(!task_on_rq_queued(p));
2438
2439 /*
2440 * Then we pull iff p has actually an earlier
2441 * deadline than the current task of its runqueue.
2442 */
2443 if (dl_time_before(p->dl.deadline,
2444 src_rq->curr->dl.deadline))
2445 goto skip;
2446
2447 if (is_migration_disabled(p)) {
2448 push_task = get_push_task(src_rq);
2449 } else {
2450 deactivate_task(src_rq, p, 0);
2451 set_task_cpu(p, this_cpu);
2452 activate_task(this_rq, p, 0);
2453 dmin = p->dl.deadline;
2454 resched = true;
2455 }
2456
2457 /* Is there any other task even earlier? */
2458 }
2459 skip:
2460 double_unlock_balance(this_rq, src_rq);
2461
2462 if (push_task) {
2463 preempt_disable();
2464 raw_spin_rq_unlock(this_rq);
2465 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2466 push_task, &src_rq->push_work);
2467 preempt_enable();
2468 raw_spin_rq_lock(this_rq);
2469 }
2470 }
2471
2472 if (resched)
2473 resched_curr(this_rq);
2474 }
2475
2476 /*
2477 * Since the task is not running and a reschedule is not going to happen
2478 * anytime soon on its runqueue, we try pushing it away now.
2479 */
task_woken_dl(struct rq * rq,struct task_struct * p)2480 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2481 {
2482 if (!task_on_cpu(rq, p) &&
2483 !test_tsk_need_resched(rq->curr) &&
2484 p->nr_cpus_allowed > 1 &&
2485 dl_task(rq->curr) &&
2486 (rq->curr->nr_cpus_allowed < 2 ||
2487 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2488 push_dl_tasks(rq);
2489 }
2490 }
2491
set_cpus_allowed_dl(struct task_struct * p,const struct cpumask * new_mask,u32 flags)2492 static void set_cpus_allowed_dl(struct task_struct *p,
2493 const struct cpumask *new_mask,
2494 u32 flags)
2495 {
2496 struct root_domain *src_rd;
2497 struct rq *rq;
2498
2499 WARN_ON_ONCE(!dl_task(p));
2500
2501 rq = task_rq(p);
2502 src_rd = rq->rd;
2503 /*
2504 * Migrating a SCHED_DEADLINE task between exclusive
2505 * cpusets (different root_domains) entails a bandwidth
2506 * update. We already made space for us in the destination
2507 * domain (see cpuset_can_attach()).
2508 */
2509 if (!cpumask_intersects(src_rd->span, new_mask)) {
2510 struct dl_bw *src_dl_b;
2511
2512 src_dl_b = dl_bw_of(cpu_of(rq));
2513 /*
2514 * We now free resources of the root_domain we are migrating
2515 * off. In the worst case, sched_setattr() may temporary fail
2516 * until we complete the update.
2517 */
2518 raw_spin_lock(&src_dl_b->lock);
2519 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2520 raw_spin_unlock(&src_dl_b->lock);
2521 }
2522
2523 set_cpus_allowed_common(p, new_mask, flags);
2524 }
2525
2526 /* Assumes rq->lock is held */
rq_online_dl(struct rq * rq)2527 static void rq_online_dl(struct rq *rq)
2528 {
2529 if (rq->dl.overloaded)
2530 dl_set_overload(rq);
2531
2532 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2533 if (rq->dl.dl_nr_running > 0)
2534 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2535 }
2536
2537 /* Assumes rq->lock is held */
rq_offline_dl(struct rq * rq)2538 static void rq_offline_dl(struct rq *rq)
2539 {
2540 if (rq->dl.overloaded)
2541 dl_clear_overload(rq);
2542
2543 cpudl_clear(&rq->rd->cpudl, rq->cpu);
2544 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2545 }
2546
init_sched_dl_class(void)2547 void __init init_sched_dl_class(void)
2548 {
2549 unsigned int i;
2550
2551 for_each_possible_cpu(i)
2552 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2553 GFP_KERNEL, cpu_to_node(i));
2554 }
2555
dl_add_task_root_domain(struct task_struct * p)2556 void dl_add_task_root_domain(struct task_struct *p)
2557 {
2558 struct rq_flags rf;
2559 struct rq *rq;
2560 struct dl_bw *dl_b;
2561
2562 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2563 if (!dl_task(p)) {
2564 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2565 return;
2566 }
2567
2568 rq = __task_rq_lock(p, &rf);
2569
2570 dl_b = &rq->rd->dl_bw;
2571 raw_spin_lock(&dl_b->lock);
2572
2573 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2574
2575 raw_spin_unlock(&dl_b->lock);
2576
2577 task_rq_unlock(rq, p, &rf);
2578 }
2579
dl_clear_root_domain(struct root_domain * rd)2580 void dl_clear_root_domain(struct root_domain *rd)
2581 {
2582 unsigned long flags;
2583
2584 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2585 rd->dl_bw.total_bw = 0;
2586 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2587 }
2588
2589 #endif /* CONFIG_SMP */
2590
switched_from_dl(struct rq * rq,struct task_struct * p)2591 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2592 {
2593 /*
2594 * task_non_contending() can start the "inactive timer" (if the 0-lag
2595 * time is in the future). If the task switches back to dl before
2596 * the "inactive timer" fires, it can continue to consume its current
2597 * runtime using its current deadline. If it stays outside of
2598 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2599 * will reset the task parameters.
2600 */
2601 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2602 task_non_contending(p);
2603
2604 /*
2605 * In case a task is setscheduled out from SCHED_DEADLINE we need to
2606 * keep track of that on its cpuset (for correct bandwidth tracking).
2607 */
2608 dec_dl_tasks_cs(p);
2609
2610 if (!task_on_rq_queued(p)) {
2611 /*
2612 * Inactive timer is armed. However, p is leaving DEADLINE and
2613 * might migrate away from this rq while continuing to run on
2614 * some other class. We need to remove its contribution from
2615 * this rq running_bw now, or sub_rq_bw (below) will complain.
2616 */
2617 if (p->dl.dl_non_contending)
2618 sub_running_bw(&p->dl, &rq->dl);
2619 sub_rq_bw(&p->dl, &rq->dl);
2620 }
2621
2622 /*
2623 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2624 * at the 0-lag time, because the task could have been migrated
2625 * while SCHED_OTHER in the meanwhile.
2626 */
2627 if (p->dl.dl_non_contending)
2628 p->dl.dl_non_contending = 0;
2629
2630 /*
2631 * Since this might be the only -deadline task on the rq,
2632 * this is the right place to try to pull some other one
2633 * from an overloaded CPU, if any.
2634 */
2635 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2636 return;
2637
2638 deadline_queue_pull_task(rq);
2639 }
2640
2641 /*
2642 * When switching to -deadline, we may overload the rq, then
2643 * we try to push someone off, if possible.
2644 */
switched_to_dl(struct rq * rq,struct task_struct * p)2645 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2646 {
2647 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2648 put_task_struct(p);
2649
2650 /*
2651 * In case a task is setscheduled to SCHED_DEADLINE we need to keep
2652 * track of that on its cpuset (for correct bandwidth tracking).
2653 */
2654 inc_dl_tasks_cs(p);
2655
2656 /* If p is not queued we will update its parameters at next wakeup. */
2657 if (!task_on_rq_queued(p)) {
2658 add_rq_bw(&p->dl, &rq->dl);
2659
2660 return;
2661 }
2662
2663 if (rq->curr != p) {
2664 #ifdef CONFIG_SMP
2665 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2666 deadline_queue_push_tasks(rq);
2667 #endif
2668 if (dl_task(rq->curr))
2669 check_preempt_curr_dl(rq, p, 0);
2670 else
2671 resched_curr(rq);
2672 } else {
2673 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2674 }
2675 }
2676
2677 /*
2678 * If the scheduling parameters of a -deadline task changed,
2679 * a push or pull operation might be needed.
2680 */
prio_changed_dl(struct rq * rq,struct task_struct * p,int oldprio)2681 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2682 int oldprio)
2683 {
2684 if (task_on_rq_queued(p) || task_current(rq, p)) {
2685 #ifdef CONFIG_SMP
2686 /*
2687 * This might be too much, but unfortunately
2688 * we don't have the old deadline value, and
2689 * we can't argue if the task is increasing
2690 * or lowering its prio, so...
2691 */
2692 if (!rq->dl.overloaded)
2693 deadline_queue_pull_task(rq);
2694
2695 /*
2696 * If we now have a earlier deadline task than p,
2697 * then reschedule, provided p is still on this
2698 * runqueue.
2699 */
2700 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2701 resched_curr(rq);
2702 #else
2703 /*
2704 * Again, we don't know if p has a earlier
2705 * or later deadline, so let's blindly set a
2706 * (maybe not needed) rescheduling point.
2707 */
2708 resched_curr(rq);
2709 #endif /* CONFIG_SMP */
2710 }
2711 }
2712
2713 DEFINE_SCHED_CLASS(dl) = {
2714
2715 .enqueue_task = enqueue_task_dl,
2716 .dequeue_task = dequeue_task_dl,
2717 .yield_task = yield_task_dl,
2718
2719 .check_preempt_curr = check_preempt_curr_dl,
2720
2721 .pick_next_task = pick_next_task_dl,
2722 .put_prev_task = put_prev_task_dl,
2723 .set_next_task = set_next_task_dl,
2724
2725 #ifdef CONFIG_SMP
2726 .balance = balance_dl,
2727 .pick_task = pick_task_dl,
2728 .select_task_rq = select_task_rq_dl,
2729 .migrate_task_rq = migrate_task_rq_dl,
2730 .set_cpus_allowed = set_cpus_allowed_dl,
2731 .rq_online = rq_online_dl,
2732 .rq_offline = rq_offline_dl,
2733 .task_woken = task_woken_dl,
2734 .find_lock_rq = find_lock_later_rq,
2735 #endif
2736
2737 .task_tick = task_tick_dl,
2738 .task_fork = task_fork_dl,
2739
2740 .prio_changed = prio_changed_dl,
2741 .switched_from = switched_from_dl,
2742 .switched_to = switched_to_dl,
2743
2744 .update_curr = update_curr_dl,
2745 };
2746
2747 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
2748 static u64 dl_generation;
2749
sched_dl_global_validate(void)2750 int sched_dl_global_validate(void)
2751 {
2752 u64 runtime = global_rt_runtime();
2753 u64 period = global_rt_period();
2754 u64 new_bw = to_ratio(period, runtime);
2755 u64 gen = ++dl_generation;
2756 struct dl_bw *dl_b;
2757 int cpu, cpus, ret = 0;
2758 unsigned long flags;
2759
2760 /*
2761 * Here we want to check the bandwidth not being set to some
2762 * value smaller than the currently allocated bandwidth in
2763 * any of the root_domains.
2764 */
2765 for_each_possible_cpu(cpu) {
2766 rcu_read_lock_sched();
2767
2768 if (dl_bw_visited(cpu, gen))
2769 goto next;
2770
2771 dl_b = dl_bw_of(cpu);
2772 cpus = dl_bw_cpus(cpu);
2773
2774 raw_spin_lock_irqsave(&dl_b->lock, flags);
2775 if (new_bw * cpus < dl_b->total_bw)
2776 ret = -EBUSY;
2777 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2778
2779 next:
2780 rcu_read_unlock_sched();
2781
2782 if (ret)
2783 break;
2784 }
2785
2786 return ret;
2787 }
2788
init_dl_rq_bw_ratio(struct dl_rq * dl_rq)2789 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2790 {
2791 if (global_rt_runtime() == RUNTIME_INF) {
2792 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2793 dl_rq->extra_bw = 1 << BW_SHIFT;
2794 } else {
2795 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2796 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2797 dl_rq->extra_bw = to_ratio(global_rt_period(),
2798 global_rt_runtime());
2799 }
2800 }
2801
sched_dl_do_global(void)2802 void sched_dl_do_global(void)
2803 {
2804 u64 new_bw = -1;
2805 u64 gen = ++dl_generation;
2806 struct dl_bw *dl_b;
2807 int cpu;
2808 unsigned long flags;
2809
2810 if (global_rt_runtime() != RUNTIME_INF)
2811 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2812
2813 for_each_possible_cpu(cpu) {
2814 rcu_read_lock_sched();
2815
2816 if (dl_bw_visited(cpu, gen)) {
2817 rcu_read_unlock_sched();
2818 continue;
2819 }
2820
2821 dl_b = dl_bw_of(cpu);
2822
2823 raw_spin_lock_irqsave(&dl_b->lock, flags);
2824 dl_b->bw = new_bw;
2825 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2826
2827 rcu_read_unlock_sched();
2828 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2829 }
2830 }
2831
2832 /*
2833 * We must be sure that accepting a new task (or allowing changing the
2834 * parameters of an existing one) is consistent with the bandwidth
2835 * constraints. If yes, this function also accordingly updates the currently
2836 * allocated bandwidth to reflect the new situation.
2837 *
2838 * This function is called while holding p's rq->lock.
2839 */
sched_dl_overflow(struct task_struct * p,int policy,const struct sched_attr * attr)2840 int sched_dl_overflow(struct task_struct *p, int policy,
2841 const struct sched_attr *attr)
2842 {
2843 u64 period = attr->sched_period ?: attr->sched_deadline;
2844 u64 runtime = attr->sched_runtime;
2845 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2846 int cpus, err = -1, cpu = task_cpu(p);
2847 struct dl_bw *dl_b = dl_bw_of(cpu);
2848 unsigned long cap;
2849
2850 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2851 return 0;
2852
2853 /* !deadline task may carry old deadline bandwidth */
2854 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2855 return 0;
2856
2857 /*
2858 * Either if a task, enters, leave, or stays -deadline but changes
2859 * its parameters, we may need to update accordingly the total
2860 * allocated bandwidth of the container.
2861 */
2862 raw_spin_lock(&dl_b->lock);
2863 cpus = dl_bw_cpus(cpu);
2864 cap = dl_bw_capacity(cpu);
2865
2866 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2867 !__dl_overflow(dl_b, cap, 0, new_bw)) {
2868 if (hrtimer_active(&p->dl.inactive_timer))
2869 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2870 __dl_add(dl_b, new_bw, cpus);
2871 err = 0;
2872 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2873 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2874 /*
2875 * XXX this is slightly incorrect: when the task
2876 * utilization decreases, we should delay the total
2877 * utilization change until the task's 0-lag point.
2878 * But this would require to set the task's "inactive
2879 * timer" when the task is not inactive.
2880 */
2881 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2882 __dl_add(dl_b, new_bw, cpus);
2883 dl_change_utilization(p, new_bw);
2884 err = 0;
2885 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2886 /*
2887 * Do not decrease the total deadline utilization here,
2888 * switched_from_dl() will take care to do it at the correct
2889 * (0-lag) time.
2890 */
2891 err = 0;
2892 }
2893 raw_spin_unlock(&dl_b->lock);
2894
2895 return err;
2896 }
2897
2898 /*
2899 * This function initializes the sched_dl_entity of a newly becoming
2900 * SCHED_DEADLINE task.
2901 *
2902 * Only the static values are considered here, the actual runtime and the
2903 * absolute deadline will be properly calculated when the task is enqueued
2904 * for the first time with its new policy.
2905 */
__setparam_dl(struct task_struct * p,const struct sched_attr * attr)2906 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2907 {
2908 struct sched_dl_entity *dl_se = &p->dl;
2909
2910 dl_se->dl_runtime = attr->sched_runtime;
2911 dl_se->dl_deadline = attr->sched_deadline;
2912 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2913 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
2914 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2915 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2916 }
2917
__getparam_dl(struct task_struct * p,struct sched_attr * attr)2918 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2919 {
2920 struct sched_dl_entity *dl_se = &p->dl;
2921
2922 attr->sched_priority = p->rt_priority;
2923 attr->sched_runtime = dl_se->dl_runtime;
2924 attr->sched_deadline = dl_se->dl_deadline;
2925 attr->sched_period = dl_se->dl_period;
2926 attr->sched_flags &= ~SCHED_DL_FLAGS;
2927 attr->sched_flags |= dl_se->flags;
2928 }
2929
2930 /*
2931 * This function validates the new parameters of a -deadline task.
2932 * We ask for the deadline not being zero, and greater or equal
2933 * than the runtime, as well as the period of being zero or
2934 * greater than deadline. Furthermore, we have to be sure that
2935 * user parameters are above the internal resolution of 1us (we
2936 * check sched_runtime only since it is always the smaller one) and
2937 * below 2^63 ns (we have to check both sched_deadline and
2938 * sched_period, as the latter can be zero).
2939 */
__checkparam_dl(const struct sched_attr * attr)2940 bool __checkparam_dl(const struct sched_attr *attr)
2941 {
2942 u64 period, max, min;
2943
2944 /* special dl tasks don't actually use any parameter */
2945 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2946 return true;
2947
2948 /* deadline != 0 */
2949 if (attr->sched_deadline == 0)
2950 return false;
2951
2952 /*
2953 * Since we truncate DL_SCALE bits, make sure we're at least
2954 * that big.
2955 */
2956 if (attr->sched_runtime < (1ULL << DL_SCALE))
2957 return false;
2958
2959 /*
2960 * Since we use the MSB for wrap-around and sign issues, make
2961 * sure it's not set (mind that period can be equal to zero).
2962 */
2963 if (attr->sched_deadline & (1ULL << 63) ||
2964 attr->sched_period & (1ULL << 63))
2965 return false;
2966
2967 period = attr->sched_period;
2968 if (!period)
2969 period = attr->sched_deadline;
2970
2971 /* runtime <= deadline <= period (if period != 0) */
2972 if (period < attr->sched_deadline ||
2973 attr->sched_deadline < attr->sched_runtime)
2974 return false;
2975
2976 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
2977 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
2978
2979 if (period < min || period > max)
2980 return false;
2981
2982 return true;
2983 }
2984
2985 /*
2986 * This function clears the sched_dl_entity static params.
2987 */
__dl_clear_params(struct task_struct * p)2988 void __dl_clear_params(struct task_struct *p)
2989 {
2990 struct sched_dl_entity *dl_se = &p->dl;
2991
2992 dl_se->dl_runtime = 0;
2993 dl_se->dl_deadline = 0;
2994 dl_se->dl_period = 0;
2995 dl_se->flags = 0;
2996 dl_se->dl_bw = 0;
2997 dl_se->dl_density = 0;
2998
2999 dl_se->dl_throttled = 0;
3000 dl_se->dl_yielded = 0;
3001 dl_se->dl_non_contending = 0;
3002 dl_se->dl_overrun = 0;
3003
3004 #ifdef CONFIG_RT_MUTEXES
3005 dl_se->pi_se = dl_se;
3006 #endif
3007 }
3008
dl_param_changed(struct task_struct * p,const struct sched_attr * attr)3009 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
3010 {
3011 struct sched_dl_entity *dl_se = &p->dl;
3012
3013 if (dl_se->dl_runtime != attr->sched_runtime ||
3014 dl_se->dl_deadline != attr->sched_deadline ||
3015 dl_se->dl_period != attr->sched_period ||
3016 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
3017 return true;
3018
3019 return false;
3020 }
3021
3022 #ifdef CONFIG_SMP
dl_cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)3023 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
3024 const struct cpumask *trial)
3025 {
3026 unsigned long flags, cap;
3027 struct dl_bw *cur_dl_b;
3028 int ret = 1;
3029
3030 rcu_read_lock_sched();
3031 cur_dl_b = dl_bw_of(cpumask_any(cur));
3032 cap = __dl_bw_capacity(trial);
3033 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
3034 if (__dl_overflow(cur_dl_b, cap, 0, 0))
3035 ret = 0;
3036 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3037 rcu_read_unlock_sched();
3038
3039 return ret;
3040 }
3041
3042 enum dl_bw_request {
3043 dl_bw_req_check_overflow = 0,
3044 dl_bw_req_alloc,
3045 dl_bw_req_free
3046 };
3047
dl_bw_manage(enum dl_bw_request req,int cpu,u64 dl_bw)3048 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
3049 {
3050 unsigned long flags;
3051 struct dl_bw *dl_b;
3052 bool overflow = 0;
3053
3054 rcu_read_lock_sched();
3055 dl_b = dl_bw_of(cpu);
3056 raw_spin_lock_irqsave(&dl_b->lock, flags);
3057
3058 if (req == dl_bw_req_free) {
3059 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
3060 } else {
3061 unsigned long cap = dl_bw_capacity(cpu);
3062
3063 overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
3064
3065 if (req == dl_bw_req_alloc && !overflow) {
3066 /*
3067 * We reserve space in the destination
3068 * root_domain, as we can't fail after this point.
3069 * We will free resources in the source root_domain
3070 * later on (see set_cpus_allowed_dl()).
3071 */
3072 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
3073 }
3074 }
3075
3076 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3077 rcu_read_unlock_sched();
3078
3079 return overflow ? -EBUSY : 0;
3080 }
3081
dl_bw_check_overflow(int cpu)3082 int dl_bw_check_overflow(int cpu)
3083 {
3084 return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
3085 }
3086
dl_bw_alloc(int cpu,u64 dl_bw)3087 int dl_bw_alloc(int cpu, u64 dl_bw)
3088 {
3089 return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
3090 }
3091
dl_bw_free(int cpu,u64 dl_bw)3092 void dl_bw_free(int cpu, u64 dl_bw)
3093 {
3094 dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
3095 }
3096 #endif
3097
3098 #ifdef CONFIG_SCHED_DEBUG
print_dl_stats(struct seq_file * m,int cpu)3099 void print_dl_stats(struct seq_file *m, int cpu)
3100 {
3101 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3102 }
3103 #endif /* CONFIG_SCHED_DEBUG */
3104