1 /*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
16 */
17 #include "sched.h"
18
19 #include <linux/slab.h>
20
21 #include "walt.h"
22
23 struct dl_bandwidth def_dl_bandwidth;
24
dl_task_of(struct sched_dl_entity * dl_se)25 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
26 {
27 return container_of(dl_se, struct task_struct, dl);
28 }
29
rq_of_dl_rq(struct dl_rq * dl_rq)30 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
31 {
32 return container_of(dl_rq, struct rq, dl);
33 }
34
dl_rq_of_se(struct sched_dl_entity * dl_se)35 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
36 {
37 struct task_struct *p = dl_task_of(dl_se);
38 struct rq *rq = task_rq(p);
39
40 return &rq->dl;
41 }
42
on_dl_rq(struct sched_dl_entity * dl_se)43 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
44 {
45 return !RB_EMPTY_NODE(&dl_se->rb_node);
46 }
47
is_leftmost(struct task_struct * p,struct dl_rq * dl_rq)48 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
49 {
50 struct sched_dl_entity *dl_se = &p->dl;
51
52 return dl_rq->rb_leftmost == &dl_se->rb_node;
53 }
54
init_dl_bandwidth(struct dl_bandwidth * dl_b,u64 period,u64 runtime)55 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
56 {
57 raw_spin_lock_init(&dl_b->dl_runtime_lock);
58 dl_b->dl_period = period;
59 dl_b->dl_runtime = runtime;
60 }
61
init_dl_bw(struct dl_bw * dl_b)62 void init_dl_bw(struct dl_bw *dl_b)
63 {
64 raw_spin_lock_init(&dl_b->lock);
65 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
66 if (global_rt_runtime() == RUNTIME_INF)
67 dl_b->bw = -1;
68 else
69 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
70 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
71 dl_b->total_bw = 0;
72 }
73
init_dl_rq(struct dl_rq * dl_rq)74 void init_dl_rq(struct dl_rq *dl_rq)
75 {
76 dl_rq->rb_root = RB_ROOT;
77
78 #ifdef CONFIG_SMP
79 /* zero means no -deadline tasks */
80 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
81
82 dl_rq->dl_nr_migratory = 0;
83 dl_rq->overloaded = 0;
84 dl_rq->pushable_dl_tasks_root = RB_ROOT;
85 #else
86 init_dl_bw(&dl_rq->dl_bw);
87 #endif
88 }
89
90 #ifdef CONFIG_SMP
91
dl_overloaded(struct rq * rq)92 static inline int dl_overloaded(struct rq *rq)
93 {
94 return atomic_read(&rq->rd->dlo_count);
95 }
96
dl_set_overload(struct rq * rq)97 static inline void dl_set_overload(struct rq *rq)
98 {
99 if (!rq->online)
100 return;
101
102 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
103 /*
104 * Must be visible before the overload count is
105 * set (as in sched_rt.c).
106 *
107 * Matched by the barrier in pull_dl_task().
108 */
109 smp_wmb();
110 atomic_inc(&rq->rd->dlo_count);
111 }
112
dl_clear_overload(struct rq * rq)113 static inline void dl_clear_overload(struct rq *rq)
114 {
115 if (!rq->online)
116 return;
117
118 atomic_dec(&rq->rd->dlo_count);
119 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
120 }
121
update_dl_migration(struct dl_rq * dl_rq)122 static void update_dl_migration(struct dl_rq *dl_rq)
123 {
124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
125 if (!dl_rq->overloaded) {
126 dl_set_overload(rq_of_dl_rq(dl_rq));
127 dl_rq->overloaded = 1;
128 }
129 } else if (dl_rq->overloaded) {
130 dl_clear_overload(rq_of_dl_rq(dl_rq));
131 dl_rq->overloaded = 0;
132 }
133 }
134
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)135 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
136 {
137 struct task_struct *p = dl_task_of(dl_se);
138
139 if (tsk_nr_cpus_allowed(p) > 1)
140 dl_rq->dl_nr_migratory++;
141
142 update_dl_migration(dl_rq);
143 }
144
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)145 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
146 {
147 struct task_struct *p = dl_task_of(dl_se);
148
149 if (tsk_nr_cpus_allowed(p) > 1)
150 dl_rq->dl_nr_migratory--;
151
152 update_dl_migration(dl_rq);
153 }
154
155 /*
156 * The list of pushable -deadline task is not a plist, like in
157 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
158 */
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)159 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
160 {
161 struct dl_rq *dl_rq = &rq->dl;
162 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
163 struct rb_node *parent = NULL;
164 struct task_struct *entry;
165 int leftmost = 1;
166
167 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
168
169 while (*link) {
170 parent = *link;
171 entry = rb_entry(parent, struct task_struct,
172 pushable_dl_tasks);
173 if (dl_entity_preempt(&p->dl, &entry->dl))
174 link = &parent->rb_left;
175 else {
176 link = &parent->rb_right;
177 leftmost = 0;
178 }
179 }
180
181 if (leftmost) {
182 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
183 dl_rq->earliest_dl.next = p->dl.deadline;
184 }
185
186 rb_link_node(&p->pushable_dl_tasks, parent, link);
187 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
188 }
189
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)190 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
191 {
192 struct dl_rq *dl_rq = &rq->dl;
193
194 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
195 return;
196
197 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
198 struct rb_node *next_node;
199
200 next_node = rb_next(&p->pushable_dl_tasks);
201 dl_rq->pushable_dl_tasks_leftmost = next_node;
202 if (next_node) {
203 dl_rq->earliest_dl.next = rb_entry(next_node,
204 struct task_struct, pushable_dl_tasks)->dl.deadline;
205 }
206 }
207
208 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
209 RB_CLEAR_NODE(&p->pushable_dl_tasks);
210 }
211
has_pushable_dl_tasks(struct rq * rq)212 static inline int has_pushable_dl_tasks(struct rq *rq)
213 {
214 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
215 }
216
217 static int push_dl_task(struct rq *rq);
218
need_pull_dl_task(struct rq * rq,struct task_struct * prev)219 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
220 {
221 return dl_task(prev);
222 }
223
224 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
225 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
226
227 static void push_dl_tasks(struct rq *);
228 static void pull_dl_task(struct rq *);
229
queue_push_tasks(struct rq * rq)230 static inline void queue_push_tasks(struct rq *rq)
231 {
232 if (!has_pushable_dl_tasks(rq))
233 return;
234
235 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
236 }
237
queue_pull_task(struct rq * rq)238 static inline void queue_pull_task(struct rq *rq)
239 {
240 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
241 }
242
243 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
244
dl_task_offline_migration(struct rq * rq,struct task_struct * p)245 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
246 {
247 struct rq *later_rq = NULL;
248
249 later_rq = find_lock_later_rq(p, rq);
250 if (!later_rq) {
251 int cpu;
252
253 /*
254 * If we cannot preempt any rq, fall back to pick any
255 * online cpu.
256 */
257 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
258 if (cpu >= nr_cpu_ids) {
259 /*
260 * Fail to find any suitable cpu.
261 * The task will never come back!
262 */
263 BUG_ON(dl_bandwidth_enabled());
264
265 /*
266 * If admission control is disabled we
267 * try a little harder to let the task
268 * run.
269 */
270 cpu = cpumask_any(cpu_active_mask);
271 }
272 later_rq = cpu_rq(cpu);
273 double_lock_balance(rq, later_rq);
274 }
275
276 set_task_cpu(p, later_rq->cpu);
277 double_unlock_balance(later_rq, rq);
278
279 return later_rq;
280 }
281
282 #else
283
284 static inline
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)285 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
286 {
287 }
288
289 static inline
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)290 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
291 {
292 }
293
294 static inline
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)295 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
296 {
297 }
298
299 static inline
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)300 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
301 {
302 }
303
need_pull_dl_task(struct rq * rq,struct task_struct * prev)304 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
305 {
306 return false;
307 }
308
pull_dl_task(struct rq * rq)309 static inline void pull_dl_task(struct rq *rq)
310 {
311 }
312
queue_push_tasks(struct rq * rq)313 static inline void queue_push_tasks(struct rq *rq)
314 {
315 }
316
queue_pull_task(struct rq * rq)317 static inline void queue_pull_task(struct rq *rq)
318 {
319 }
320 #endif /* CONFIG_SMP */
321
322 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
323 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
324 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
325 int flags);
326
327 /*
328 * We are being explicitly informed that a new instance is starting,
329 * and this means that:
330 * - the absolute deadline of the entity has to be placed at
331 * current time + relative deadline;
332 * - the runtime of the entity has to be set to the maximum value.
333 *
334 * The capability of specifying such event is useful whenever a -deadline
335 * entity wants to (try to!) synchronize its behaviour with the scheduler's
336 * one, and to (try to!) reconcile itself with its own scheduling
337 * parameters.
338 */
setup_new_dl_entity(struct sched_dl_entity * dl_se)339 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
340 {
341 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
342 struct rq *rq = rq_of_dl_rq(dl_rq);
343
344 WARN_ON(dl_se->dl_boosted);
345 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
346
347 /*
348 * We are racing with the deadline timer. So, do nothing because
349 * the deadline timer handler will take care of properly recharging
350 * the runtime and postponing the deadline
351 */
352 if (dl_se->dl_throttled)
353 return;
354
355 /*
356 * We use the regular wall clock time to set deadlines in the
357 * future; in fact, we must consider execution overheads (time
358 * spent on hardirq context, etc.).
359 */
360 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
361 dl_se->runtime = dl_se->dl_runtime;
362 }
363
364 /*
365 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
366 * possibility of a entity lasting more than what it declared, and thus
367 * exhausting its runtime.
368 *
369 * Here we are interested in making runtime overrun possible, but we do
370 * not want a entity which is misbehaving to affect the scheduling of all
371 * other entities.
372 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
373 * is used, in order to confine each entity within its own bandwidth.
374 *
375 * This function deals exactly with that, and ensures that when the runtime
376 * of a entity is replenished, its deadline is also postponed. That ensures
377 * the overrunning entity can't interfere with other entity in the system and
378 * can't make them miss their deadlines. Reasons why this kind of overruns
379 * could happen are, typically, a entity voluntarily trying to overcome its
380 * runtime, or it just underestimated it during sched_setattr().
381 */
replenish_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se)382 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
383 struct sched_dl_entity *pi_se)
384 {
385 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
386 struct rq *rq = rq_of_dl_rq(dl_rq);
387
388 BUG_ON(pi_se->dl_runtime <= 0);
389
390 /*
391 * This could be the case for a !-dl task that is boosted.
392 * Just go with full inherited parameters.
393 */
394 if (dl_se->dl_deadline == 0) {
395 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
396 dl_se->runtime = pi_se->dl_runtime;
397 }
398
399 if (dl_se->dl_yielded && dl_se->runtime > 0)
400 dl_se->runtime = 0;
401
402 /*
403 * We keep moving the deadline away until we get some
404 * available runtime for the entity. This ensures correct
405 * handling of situations where the runtime overrun is
406 * arbitrary large.
407 */
408 while (dl_se->runtime <= 0) {
409 dl_se->deadline += pi_se->dl_period;
410 dl_se->runtime += pi_se->dl_runtime;
411 }
412
413 /*
414 * At this point, the deadline really should be "in
415 * the future" with respect to rq->clock. If it's
416 * not, we are, for some reason, lagging too much!
417 * Anyway, after having warn userspace abut that,
418 * we still try to keep the things running by
419 * resetting the deadline and the budget of the
420 * entity.
421 */
422 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
423 printk_deferred_once("sched: DL replenish lagged too much\n");
424 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
425 dl_se->runtime = pi_se->dl_runtime;
426 }
427
428 if (dl_se->dl_yielded)
429 dl_se->dl_yielded = 0;
430 if (dl_se->dl_throttled)
431 dl_se->dl_throttled = 0;
432 }
433
434 /*
435 * Here we check if --at time t-- an entity (which is probably being
436 * [re]activated or, in general, enqueued) can use its remaining runtime
437 * and its current deadline _without_ exceeding the bandwidth it is
438 * assigned (function returns true if it can't). We are in fact applying
439 * one of the CBS rules: when a task wakes up, if the residual runtime
440 * over residual deadline fits within the allocated bandwidth, then we
441 * can keep the current (absolute) deadline and residual budget without
442 * disrupting the schedulability of the system. Otherwise, we should
443 * refill the runtime and set the deadline a period in the future,
444 * because keeping the current (absolute) deadline of the task would
445 * result in breaking guarantees promised to other tasks (refer to
446 * Documentation/scheduler/sched-deadline.txt for more informations).
447 *
448 * This function returns true if:
449 *
450 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
451 *
452 * IOW we can't recycle current parameters.
453 *
454 * Notice that the bandwidth check is done against the deadline. For
455 * task with deadline equal to period this is the same of using
456 * dl_period instead of dl_deadline in the equation above.
457 */
dl_entity_overflow(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se,u64 t)458 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
459 struct sched_dl_entity *pi_se, u64 t)
460 {
461 u64 left, right;
462
463 /*
464 * left and right are the two sides of the equation above,
465 * after a bit of shuffling to use multiplications instead
466 * of divisions.
467 *
468 * Note that none of the time values involved in the two
469 * multiplications are absolute: dl_deadline and dl_runtime
470 * are the relative deadline and the maximum runtime of each
471 * instance, runtime is the runtime left for the last instance
472 * and (deadline - t), since t is rq->clock, is the time left
473 * to the (absolute) deadline. Even if overflowing the u64 type
474 * is very unlikely to occur in both cases, here we scale down
475 * as we want to avoid that risk at all. Scaling down by 10
476 * means that we reduce granularity to 1us. We are fine with it,
477 * since this is only a true/false check and, anyway, thinking
478 * of anything below microseconds resolution is actually fiction
479 * (but still we want to give the user that illusion >;).
480 */
481 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
482 right = ((dl_se->deadline - t) >> DL_SCALE) *
483 (pi_se->dl_runtime >> DL_SCALE);
484
485 return dl_time_before(right, left);
486 }
487
488 /*
489 * Revised wakeup rule [1]: For self-suspending tasks, rather then
490 * re-initializing task's runtime and deadline, the revised wakeup
491 * rule adjusts the task's runtime to avoid the task to overrun its
492 * density.
493 *
494 * Reasoning: a task may overrun the density if:
495 * runtime / (deadline - t) > dl_runtime / dl_deadline
496 *
497 * Therefore, runtime can be adjusted to:
498 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
499 *
500 * In such way that runtime will be equal to the maximum density
501 * the task can use without breaking any rule.
502 *
503 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
504 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
505 */
506 static void
update_dl_revised_wakeup(struct sched_dl_entity * dl_se,struct rq * rq)507 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
508 {
509 u64 laxity = dl_se->deadline - rq_clock(rq);
510
511 /*
512 * If the task has deadline < period, and the deadline is in the past,
513 * it should already be throttled before this check.
514 *
515 * See update_dl_entity() comments for further details.
516 */
517 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
518
519 dl_se->runtime = (dl_se->dl_density * laxity) >> 20;
520 }
521
522 /*
523 * Regarding the deadline, a task with implicit deadline has a relative
524 * deadline == relative period. A task with constrained deadline has a
525 * relative deadline <= relative period.
526 *
527 * We support constrained deadline tasks. However, there are some restrictions
528 * applied only for tasks which do not have an implicit deadline. See
529 * update_dl_entity() to know more about such restrictions.
530 *
531 * The dl_is_implicit() returns true if the task has an implicit deadline.
532 */
dl_is_implicit(struct sched_dl_entity * dl_se)533 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
534 {
535 return dl_se->dl_deadline == dl_se->dl_period;
536 }
537
538 /*
539 * When a deadline entity is placed in the runqueue, its runtime and deadline
540 * might need to be updated. This is done by a CBS wake up rule. There are two
541 * different rules: 1) the original CBS; and 2) the Revisited CBS.
542 *
543 * When the task is starting a new period, the Original CBS is used. In this
544 * case, the runtime is replenished and a new absolute deadline is set.
545 *
546 * When a task is queued before the begin of the next period, using the
547 * remaining runtime and deadline could make the entity to overflow, see
548 * dl_entity_overflow() to find more about runtime overflow. When such case
549 * is detected, the runtime and deadline need to be updated.
550 *
551 * If the task has an implicit deadline, i.e., deadline == period, the Original
552 * CBS is applied. the runtime is replenished and a new absolute deadline is
553 * set, as in the previous cases.
554 *
555 * However, the Original CBS does not work properly for tasks with
556 * deadline < period, which are said to have a constrained deadline. By
557 * applying the Original CBS, a constrained deadline task would be able to run
558 * runtime/deadline in a period. With deadline < period, the task would
559 * overrun the runtime/period allowed bandwidth, breaking the admission test.
560 *
561 * In order to prevent this misbehave, the Revisited CBS is used for
562 * constrained deadline tasks when a runtime overflow is detected. In the
563 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
564 * the remaining runtime of the task is reduced to avoid runtime overflow.
565 * Please refer to the comments update_dl_revised_wakeup() function to find
566 * more about the Revised CBS rule.
567 */
update_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se)568 static void update_dl_entity(struct sched_dl_entity *dl_se,
569 struct sched_dl_entity *pi_se)
570 {
571 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
572 struct rq *rq = rq_of_dl_rq(dl_rq);
573
574 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
575 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
576
577 if (unlikely(!dl_is_implicit(dl_se) &&
578 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
579 !dl_se->dl_boosted)){
580 update_dl_revised_wakeup(dl_se, rq);
581 return;
582 }
583
584 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
585 dl_se->runtime = pi_se->dl_runtime;
586 }
587 }
588
dl_next_period(struct sched_dl_entity * dl_se)589 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
590 {
591 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
592 }
593
594 /*
595 * If the entity depleted all its runtime, and if we want it to sleep
596 * while waiting for some new execution time to become available, we
597 * set the bandwidth replenishment timer to the replenishment instant
598 * and try to activate it.
599 *
600 * Notice that it is important for the caller to know if the timer
601 * actually started or not (i.e., the replenishment instant is in
602 * the future or in the past).
603 */
start_dl_timer(struct task_struct * p)604 static int start_dl_timer(struct task_struct *p)
605 {
606 struct sched_dl_entity *dl_se = &p->dl;
607 struct hrtimer *timer = &dl_se->dl_timer;
608 struct rq *rq = task_rq(p);
609 ktime_t now, act;
610 s64 delta;
611
612 lockdep_assert_held(&rq->lock);
613
614 /*
615 * We want the timer to fire at the deadline, but considering
616 * that it is actually coming from rq->clock and not from
617 * hrtimer's time base reading.
618 */
619 act = ns_to_ktime(dl_next_period(dl_se));
620 now = hrtimer_cb_get_time(timer);
621 delta = ktime_to_ns(now) - rq_clock(rq);
622 act = ktime_add_ns(act, delta);
623
624 /*
625 * If the expiry time already passed, e.g., because the value
626 * chosen as the deadline is too small, don't even try to
627 * start the timer in the past!
628 */
629 if (ktime_us_delta(act, now) < 0)
630 return 0;
631
632 /*
633 * !enqueued will guarantee another callback; even if one is already in
634 * progress. This ensures a balanced {get,put}_task_struct().
635 *
636 * The race against __run_timer() clearing the enqueued state is
637 * harmless because we're holding task_rq()->lock, therefore the timer
638 * expiring after we've done the check will wait on its task_rq_lock()
639 * and observe our state.
640 */
641 if (!hrtimer_is_queued(timer)) {
642 get_task_struct(p);
643 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
644 }
645
646 return 1;
647 }
648
649 /*
650 * This is the bandwidth enforcement timer callback. If here, we know
651 * a task is not on its dl_rq, since the fact that the timer was running
652 * means the task is throttled and needs a runtime replenishment.
653 *
654 * However, what we actually do depends on the fact the task is active,
655 * (it is on its rq) or has been removed from there by a call to
656 * dequeue_task_dl(). In the former case we must issue the runtime
657 * replenishment and add the task back to the dl_rq; in the latter, we just
658 * do nothing but clearing dl_throttled, so that runtime and deadline
659 * updating (and the queueing back to dl_rq) will be done by the
660 * next call to enqueue_task_dl().
661 */
dl_task_timer(struct hrtimer * timer)662 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
663 {
664 struct sched_dl_entity *dl_se = container_of(timer,
665 struct sched_dl_entity,
666 dl_timer);
667 struct task_struct *p = dl_task_of(dl_se);
668 struct rq_flags rf;
669 struct rq *rq;
670
671 rq = task_rq_lock(p, &rf);
672
673 /*
674 * The task might have changed its scheduling policy to something
675 * different than SCHED_DEADLINE (through switched_fromd_dl()).
676 */
677 if (!dl_task(p)) {
678 __dl_clear_params(p);
679 goto unlock;
680 }
681
682 /*
683 * The task might have been boosted by someone else and might be in the
684 * boosting/deboosting path, its not throttled.
685 */
686 if (dl_se->dl_boosted)
687 goto unlock;
688
689 /*
690 * Spurious timer due to start_dl_timer() race; or we already received
691 * a replenishment from rt_mutex_setprio().
692 */
693 if (!dl_se->dl_throttled)
694 goto unlock;
695
696 sched_clock_tick();
697 update_rq_clock(rq);
698
699 /*
700 * If the throttle happened during sched-out; like:
701 *
702 * schedule()
703 * deactivate_task()
704 * dequeue_task_dl()
705 * update_curr_dl()
706 * start_dl_timer()
707 * __dequeue_task_dl()
708 * prev->on_rq = 0;
709 *
710 * We can be both throttled and !queued. Replenish the counter
711 * but do not enqueue -- wait for our wakeup to do that.
712 */
713 if (!task_on_rq_queued(p)) {
714 replenish_dl_entity(dl_se, dl_se);
715 goto unlock;
716 }
717
718 #ifdef CONFIG_SMP
719 if (unlikely(!rq->online)) {
720 /*
721 * If the runqueue is no longer available, migrate the
722 * task elsewhere. This necessarily changes rq.
723 */
724 lockdep_unpin_lock(&rq->lock, rf.cookie);
725 rq = dl_task_offline_migration(rq, p);
726 rf.cookie = lockdep_pin_lock(&rq->lock);
727 update_rq_clock(rq);
728
729 /*
730 * Now that the task has been migrated to the new RQ and we
731 * have that locked, proceed as normal and enqueue the task
732 * there.
733 */
734 }
735 #endif
736
737 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
738 if (dl_task(rq->curr))
739 check_preempt_curr_dl(rq, p, 0);
740 else
741 resched_curr(rq);
742
743 #ifdef CONFIG_SMP
744 /*
745 * Queueing this task back might have overloaded rq, check if we need
746 * to kick someone away.
747 */
748 if (has_pushable_dl_tasks(rq)) {
749 /*
750 * Nothing relies on rq->lock after this, so its safe to drop
751 * rq->lock.
752 */
753 lockdep_unpin_lock(&rq->lock, rf.cookie);
754 push_dl_task(rq);
755 lockdep_repin_lock(&rq->lock, rf.cookie);
756 }
757 #endif
758
759 unlock:
760 task_rq_unlock(rq, p, &rf);
761
762 /*
763 * This can free the task_struct, including this hrtimer, do not touch
764 * anything related to that after this.
765 */
766 put_task_struct(p);
767
768 return HRTIMER_NORESTART;
769 }
770
init_dl_task_timer(struct sched_dl_entity * dl_se)771 void init_dl_task_timer(struct sched_dl_entity *dl_se)
772 {
773 struct hrtimer *timer = &dl_se->dl_timer;
774
775 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
776 timer->function = dl_task_timer;
777 }
778
779 /*
780 * During the activation, CBS checks if it can reuse the current task's
781 * runtime and period. If the deadline of the task is in the past, CBS
782 * cannot use the runtime, and so it replenishes the task. This rule
783 * works fine for implicit deadline tasks (deadline == period), and the
784 * CBS was designed for implicit deadline tasks. However, a task with
785 * constrained deadline (deadine < period) might be awakened after the
786 * deadline, but before the next period. In this case, replenishing the
787 * task would allow it to run for runtime / deadline. As in this case
788 * deadline < period, CBS enables a task to run for more than the
789 * runtime / period. In a very loaded system, this can cause a domino
790 * effect, making other tasks miss their deadlines.
791 *
792 * To avoid this problem, in the activation of a constrained deadline
793 * task after the deadline but before the next period, throttle the
794 * task and set the replenishing timer to the begin of the next period,
795 * unless it is boosted.
796 */
dl_check_constrained_dl(struct sched_dl_entity * dl_se)797 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
798 {
799 struct task_struct *p = dl_task_of(dl_se);
800 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
801
802 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
803 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
804 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
805 return;
806 dl_se->dl_throttled = 1;
807 if (dl_se->runtime > 0)
808 dl_se->runtime = 0;
809 }
810 }
811
812 static
dl_runtime_exceeded(struct sched_dl_entity * dl_se)813 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
814 {
815 return (dl_se->runtime <= 0);
816 }
817
818 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
819
820 /*
821 * Update the current task's runtime statistics (provided it is still
822 * a -deadline task and has not been removed from the dl_rq).
823 */
update_curr_dl(struct rq * rq)824 static void update_curr_dl(struct rq *rq)
825 {
826 struct task_struct *curr = rq->curr;
827 struct sched_dl_entity *dl_se = &curr->dl;
828 u64 delta_exec;
829
830 if (!dl_task(curr) || !on_dl_rq(dl_se))
831 return;
832
833 /*
834 * Consumed budget is computed considering the time as
835 * observed by schedulable tasks (excluding time spent
836 * in hardirq context, etc.). Deadlines are instead
837 * computed using hard walltime. This seems to be the more
838 * natural solution, but the full ramifications of this
839 * approach need further study.
840 */
841 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
842 if (unlikely((s64)delta_exec <= 0)) {
843 if (unlikely(dl_se->dl_yielded))
844 goto throttle;
845 return;
846 }
847
848 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
849 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL);
850
851 schedstat_set(curr->se.statistics.exec_max,
852 max(curr->se.statistics.exec_max, delta_exec));
853
854 curr->se.sum_exec_runtime += delta_exec;
855 account_group_exec_runtime(curr, delta_exec);
856
857 curr->se.exec_start = rq_clock_task(rq);
858 cpuacct_charge(curr, delta_exec);
859
860 sched_rt_avg_update(rq, delta_exec);
861
862 dl_se->runtime -= delta_exec;
863
864 throttle:
865 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
866 dl_se->dl_throttled = 1;
867 __dequeue_task_dl(rq, curr, 0);
868 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
869 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
870
871 if (!is_leftmost(curr, &rq->dl))
872 resched_curr(rq);
873 }
874
875 /*
876 * Because -- for now -- we share the rt bandwidth, we need to
877 * account our runtime there too, otherwise actual rt tasks
878 * would be able to exceed the shared quota.
879 *
880 * Account to the root rt group for now.
881 *
882 * The solution we're working towards is having the RT groups scheduled
883 * using deadline servers -- however there's a few nasties to figure
884 * out before that can happen.
885 */
886 if (rt_bandwidth_enabled()) {
887 struct rt_rq *rt_rq = &rq->rt;
888
889 raw_spin_lock(&rt_rq->rt_runtime_lock);
890 /*
891 * We'll let actual RT tasks worry about the overflow here, we
892 * have our own CBS to keep us inline; only account when RT
893 * bandwidth is relevant.
894 */
895 if (sched_rt_bandwidth_account(rt_rq))
896 rt_rq->rt_time += delta_exec;
897 raw_spin_unlock(&rt_rq->rt_runtime_lock);
898 }
899 }
900
901 #ifdef CONFIG_SMP
902
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)903 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
904 {
905 struct rq *rq = rq_of_dl_rq(dl_rq);
906
907 if (dl_rq->earliest_dl.curr == 0 ||
908 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
909 dl_rq->earliest_dl.curr = deadline;
910 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
911 }
912 }
913
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)914 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
915 {
916 struct rq *rq = rq_of_dl_rq(dl_rq);
917
918 /*
919 * Since we may have removed our earliest (and/or next earliest)
920 * task we must recompute them.
921 */
922 if (!dl_rq->dl_nr_running) {
923 dl_rq->earliest_dl.curr = 0;
924 dl_rq->earliest_dl.next = 0;
925 cpudl_clear(&rq->rd->cpudl, rq->cpu);
926 } else {
927 struct rb_node *leftmost = dl_rq->rb_leftmost;
928 struct sched_dl_entity *entry;
929
930 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
931 dl_rq->earliest_dl.curr = entry->deadline;
932 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
933 }
934 }
935
936 #else
937
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)938 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)939 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
940
941 #endif /* CONFIG_SMP */
942
943 static inline
inc_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)944 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
945 {
946 int prio = dl_task_of(dl_se)->prio;
947 u64 deadline = dl_se->deadline;
948
949 WARN_ON(!dl_prio(prio));
950 dl_rq->dl_nr_running++;
951 add_nr_running(rq_of_dl_rq(dl_rq), 1);
952 walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
953
954 inc_dl_deadline(dl_rq, deadline);
955 inc_dl_migration(dl_se, dl_rq);
956 }
957
958 static inline
dec_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)959 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
960 {
961 int prio = dl_task_of(dl_se)->prio;
962
963 WARN_ON(!dl_prio(prio));
964 WARN_ON(!dl_rq->dl_nr_running);
965 dl_rq->dl_nr_running--;
966 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
967 walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
968
969 dec_dl_deadline(dl_rq, dl_se->deadline);
970 dec_dl_migration(dl_se, dl_rq);
971 }
972
__enqueue_dl_entity(struct sched_dl_entity * dl_se)973 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
974 {
975 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
976 struct rb_node **link = &dl_rq->rb_root.rb_node;
977 struct rb_node *parent = NULL;
978 struct sched_dl_entity *entry;
979 int leftmost = 1;
980
981 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
982
983 while (*link) {
984 parent = *link;
985 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
986 if (dl_time_before(dl_se->deadline, entry->deadline))
987 link = &parent->rb_left;
988 else {
989 link = &parent->rb_right;
990 leftmost = 0;
991 }
992 }
993
994 if (leftmost)
995 dl_rq->rb_leftmost = &dl_se->rb_node;
996
997 rb_link_node(&dl_se->rb_node, parent, link);
998 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
999
1000 inc_dl_tasks(dl_se, dl_rq);
1001 }
1002
__dequeue_dl_entity(struct sched_dl_entity * dl_se)1003 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1004 {
1005 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1006
1007 if (RB_EMPTY_NODE(&dl_se->rb_node))
1008 return;
1009
1010 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
1011 struct rb_node *next_node;
1012
1013 next_node = rb_next(&dl_se->rb_node);
1014 dl_rq->rb_leftmost = next_node;
1015 }
1016
1017 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
1018 RB_CLEAR_NODE(&dl_se->rb_node);
1019
1020 dec_dl_tasks(dl_se, dl_rq);
1021 }
1022
1023 static void
enqueue_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se,int flags)1024 enqueue_dl_entity(struct sched_dl_entity *dl_se,
1025 struct sched_dl_entity *pi_se, int flags)
1026 {
1027 BUG_ON(on_dl_rq(dl_se));
1028
1029 /*
1030 * If this is a wakeup or a new instance, the scheduling
1031 * parameters of the task might need updating. Otherwise,
1032 * we want a replenishment of its runtime.
1033 */
1034 if (flags & ENQUEUE_WAKEUP)
1035 update_dl_entity(dl_se, pi_se);
1036 else if (flags & ENQUEUE_REPLENISH)
1037 replenish_dl_entity(dl_se, pi_se);
1038
1039 __enqueue_dl_entity(dl_se);
1040 }
1041
dequeue_dl_entity(struct sched_dl_entity * dl_se)1042 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1043 {
1044 __dequeue_dl_entity(dl_se);
1045 }
1046
enqueue_task_dl(struct rq * rq,struct task_struct * p,int flags)1047 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1048 {
1049 struct task_struct *pi_task = rt_mutex_get_top_task(p);
1050 struct sched_dl_entity *pi_se = &p->dl;
1051
1052 /*
1053 * Use the scheduling parameters of the top pi-waiter
1054 * task if we have one and its (absolute) deadline is
1055 * smaller than our one... OTW we keep our runtime and
1056 * deadline.
1057 */
1058 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
1059 pi_se = &pi_task->dl;
1060 } else if (!dl_prio(p->normal_prio)) {
1061 /*
1062 * Special case in which we have a !SCHED_DEADLINE task
1063 * that is going to be deboosted, but exceedes its
1064 * runtime while doing so. No point in replenishing
1065 * it, as it's going to return back to its original
1066 * scheduling class after this.
1067 */
1068 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1069 return;
1070 }
1071
1072 /*
1073 * Check if a constrained deadline task was activated
1074 * after the deadline but before the next period.
1075 * If that is the case, the task will be throttled and
1076 * the replenishment timer will be set to the next period.
1077 */
1078 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1079 dl_check_constrained_dl(&p->dl);
1080
1081 /*
1082 * If p is throttled, we do nothing. In fact, if it exhausted
1083 * its budget it needs a replenishment and, since it now is on
1084 * its rq, the bandwidth timer callback (which clearly has not
1085 * run yet) will take care of this.
1086 */
1087 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
1088 return;
1089
1090 enqueue_dl_entity(&p->dl, pi_se, flags);
1091
1092 if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
1093 enqueue_pushable_dl_task(rq, p);
1094 }
1095
__dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1096 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1097 {
1098 dequeue_dl_entity(&p->dl);
1099 dequeue_pushable_dl_task(rq, p);
1100 }
1101
dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1102 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1103 {
1104 update_curr_dl(rq);
1105 __dequeue_task_dl(rq, p, flags);
1106 }
1107
1108 /*
1109 * Yield task semantic for -deadline tasks is:
1110 *
1111 * get off from the CPU until our next instance, with
1112 * a new runtime. This is of little use now, since we
1113 * don't have a bandwidth reclaiming mechanism. Anyway,
1114 * bandwidth reclaiming is planned for the future, and
1115 * yield_task_dl will indicate that some spare budget
1116 * is available for other task instances to use it.
1117 */
yield_task_dl(struct rq * rq)1118 static void yield_task_dl(struct rq *rq)
1119 {
1120 /*
1121 * We make the task go to sleep until its current deadline by
1122 * forcing its runtime to zero. This way, update_curr_dl() stops
1123 * it and the bandwidth timer will wake it up and will give it
1124 * new scheduling parameters (thanks to dl_yielded=1).
1125 */
1126 rq->curr->dl.dl_yielded = 1;
1127
1128 update_rq_clock(rq);
1129 update_curr_dl(rq);
1130 /*
1131 * Tell update_rq_clock() that we've just updated,
1132 * so we don't do microscopic update in schedule()
1133 * and double the fastpath cost.
1134 */
1135 rq_clock_skip_update(rq, true);
1136 }
1137
1138 #ifdef CONFIG_SMP
1139
1140 static int find_later_rq(struct task_struct *task);
1141
1142 static int
select_task_rq_dl(struct task_struct * p,int cpu,int sd_flag,int flags)1143 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1144 {
1145 struct task_struct *curr;
1146 struct rq *rq;
1147
1148 if (sd_flag != SD_BALANCE_WAKE)
1149 goto out;
1150
1151 rq = cpu_rq(cpu);
1152
1153 rcu_read_lock();
1154 curr = READ_ONCE(rq->curr); /* unlocked access */
1155
1156 /*
1157 * If we are dealing with a -deadline task, we must
1158 * decide where to wake it up.
1159 * If it has a later deadline and the current task
1160 * on this rq can't move (provided the waking task
1161 * can!) we prefer to send it somewhere else. On the
1162 * other hand, if it has a shorter deadline, we
1163 * try to make it stay here, it might be important.
1164 */
1165 if (unlikely(dl_task(curr)) &&
1166 (tsk_nr_cpus_allowed(curr) < 2 ||
1167 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1168 (tsk_nr_cpus_allowed(p) > 1)) {
1169 int target = find_later_rq(p);
1170
1171 if (target != -1 &&
1172 (dl_time_before(p->dl.deadline,
1173 cpu_rq(target)->dl.earliest_dl.curr) ||
1174 (cpu_rq(target)->dl.dl_nr_running == 0)))
1175 cpu = target;
1176 }
1177 rcu_read_unlock();
1178
1179 out:
1180 return cpu;
1181 }
1182
check_preempt_equal_dl(struct rq * rq,struct task_struct * p)1183 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1184 {
1185 /*
1186 * Current can't be migrated, useless to reschedule,
1187 * let's hope p can move out.
1188 */
1189 if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
1190 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1191 return;
1192
1193 /*
1194 * p is migratable, so let's not schedule it and
1195 * see if it is pushed or pulled somewhere else.
1196 */
1197 if (tsk_nr_cpus_allowed(p) != 1 &&
1198 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1199 return;
1200
1201 resched_curr(rq);
1202 }
1203
1204 #endif /* CONFIG_SMP */
1205
1206 /*
1207 * Only called when both the current and waking task are -deadline
1208 * tasks.
1209 */
check_preempt_curr_dl(struct rq * rq,struct task_struct * p,int flags)1210 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1211 int flags)
1212 {
1213 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1214 resched_curr(rq);
1215 return;
1216 }
1217
1218 #ifdef CONFIG_SMP
1219 /*
1220 * In the unlikely case current and p have the same deadline
1221 * let us try to decide what's the best thing to do...
1222 */
1223 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1224 !test_tsk_need_resched(rq->curr))
1225 check_preempt_equal_dl(rq, p);
1226 #endif /* CONFIG_SMP */
1227 }
1228
1229 #ifdef CONFIG_SCHED_HRTICK
start_hrtick_dl(struct rq * rq,struct task_struct * p)1230 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1231 {
1232 hrtick_start(rq, p->dl.runtime);
1233 }
1234 #else /* !CONFIG_SCHED_HRTICK */
start_hrtick_dl(struct rq * rq,struct task_struct * p)1235 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1236 {
1237 }
1238 #endif
1239
pick_next_dl_entity(struct rq * rq,struct dl_rq * dl_rq)1240 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1241 struct dl_rq *dl_rq)
1242 {
1243 struct rb_node *left = dl_rq->rb_leftmost;
1244
1245 if (!left)
1246 return NULL;
1247
1248 return rb_entry(left, struct sched_dl_entity, rb_node);
1249 }
1250
1251 struct task_struct *
pick_next_task_dl(struct rq * rq,struct task_struct * prev,struct pin_cookie cookie)1252 pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
1253 {
1254 struct sched_dl_entity *dl_se;
1255 struct task_struct *p;
1256 struct dl_rq *dl_rq;
1257
1258 dl_rq = &rq->dl;
1259
1260 if (need_pull_dl_task(rq, prev)) {
1261 /*
1262 * This is OK, because current is on_cpu, which avoids it being
1263 * picked for load-balance and preemption/IRQs are still
1264 * disabled avoiding further scheduler activity on it and we're
1265 * being very careful to re-start the picking loop.
1266 */
1267 lockdep_unpin_lock(&rq->lock, cookie);
1268 pull_dl_task(rq);
1269 lockdep_repin_lock(&rq->lock, cookie);
1270 /*
1271 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1272 * means a stop task can slip in, in which case we need to
1273 * re-start task selection.
1274 */
1275 if (rq->stop && task_on_rq_queued(rq->stop))
1276 return RETRY_TASK;
1277 }
1278
1279 /*
1280 * When prev is DL, we may throttle it in put_prev_task().
1281 * So, we update time before we check for dl_nr_running.
1282 */
1283 if (prev->sched_class == &dl_sched_class)
1284 update_curr_dl(rq);
1285
1286 if (unlikely(!dl_rq->dl_nr_running))
1287 return NULL;
1288
1289 put_prev_task(rq, prev);
1290
1291 dl_se = pick_next_dl_entity(rq, dl_rq);
1292 BUG_ON(!dl_se);
1293
1294 p = dl_task_of(dl_se);
1295 p->se.exec_start = rq_clock_task(rq);
1296
1297 /* Running task will never be pushed. */
1298 dequeue_pushable_dl_task(rq, p);
1299
1300 if (hrtick_enabled(rq))
1301 start_hrtick_dl(rq, p);
1302
1303 queue_push_tasks(rq);
1304
1305 return p;
1306 }
1307
put_prev_task_dl(struct rq * rq,struct task_struct * p)1308 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1309 {
1310 update_curr_dl(rq);
1311
1312 if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1)
1313 enqueue_pushable_dl_task(rq, p);
1314 }
1315
task_tick_dl(struct rq * rq,struct task_struct * p,int queued)1316 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1317 {
1318 update_curr_dl(rq);
1319
1320 /*
1321 * Even when we have runtime, update_curr_dl() might have resulted in us
1322 * not being the leftmost task anymore. In that case NEED_RESCHED will
1323 * be set and schedule() will start a new hrtick for the next task.
1324 */
1325 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1326 is_leftmost(p, &rq->dl))
1327 start_hrtick_dl(rq, p);
1328 }
1329
task_fork_dl(struct task_struct * p)1330 static void task_fork_dl(struct task_struct *p)
1331 {
1332 /*
1333 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1334 * sched_fork()
1335 */
1336 }
1337
task_dead_dl(struct task_struct * p)1338 static void task_dead_dl(struct task_struct *p)
1339 {
1340 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1341
1342 /*
1343 * Since we are TASK_DEAD we won't slip out of the domain!
1344 */
1345 raw_spin_lock_irq(&dl_b->lock);
1346 /* XXX we should retain the bw until 0-lag */
1347 dl_b->total_bw -= p->dl.dl_bw;
1348 raw_spin_unlock_irq(&dl_b->lock);
1349 }
1350
set_curr_task_dl(struct rq * rq)1351 static void set_curr_task_dl(struct rq *rq)
1352 {
1353 struct task_struct *p = rq->curr;
1354
1355 p->se.exec_start = rq_clock_task(rq);
1356
1357 /* You can't push away the running task */
1358 dequeue_pushable_dl_task(rq, p);
1359 }
1360
1361 #ifdef CONFIG_SMP
1362
1363 /* Only try algorithms three times */
1364 #define DL_MAX_TRIES 3
1365
pick_dl_task(struct rq * rq,struct task_struct * p,int cpu)1366 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1367 {
1368 if (!task_running(rq, p) &&
1369 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1370 return 1;
1371 return 0;
1372 }
1373
1374 /*
1375 * Return the earliest pushable rq's task, which is suitable to be executed
1376 * on the CPU, NULL otherwise:
1377 */
pick_earliest_pushable_dl_task(struct rq * rq,int cpu)1378 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1379 {
1380 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1381 struct task_struct *p = NULL;
1382
1383 if (!has_pushable_dl_tasks(rq))
1384 return NULL;
1385
1386 next_node:
1387 if (next_node) {
1388 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1389
1390 if (pick_dl_task(rq, p, cpu))
1391 return p;
1392
1393 next_node = rb_next(next_node);
1394 goto next_node;
1395 }
1396
1397 return NULL;
1398 }
1399
1400 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1401
find_later_rq(struct task_struct * task)1402 static int find_later_rq(struct task_struct *task)
1403 {
1404 struct sched_domain *sd;
1405 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1406 int this_cpu = smp_processor_id();
1407 int best_cpu, cpu = task_cpu(task);
1408
1409 /* Make sure the mask is initialized first */
1410 if (unlikely(!later_mask))
1411 return -1;
1412
1413 if (tsk_nr_cpus_allowed(task) == 1)
1414 return -1;
1415
1416 /*
1417 * We have to consider system topology and task affinity
1418 * first, then we can look for a suitable cpu.
1419 */
1420 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1421 task, later_mask);
1422 if (best_cpu == -1)
1423 return -1;
1424
1425 /*
1426 * If we are here, some target has been found,
1427 * the most suitable of which is cached in best_cpu.
1428 * This is, among the runqueues where the current tasks
1429 * have later deadlines than the task's one, the rq
1430 * with the latest possible one.
1431 *
1432 * Now we check how well this matches with task's
1433 * affinity and system topology.
1434 *
1435 * The last cpu where the task run is our first
1436 * guess, since it is most likely cache-hot there.
1437 */
1438 if (cpumask_test_cpu(cpu, later_mask))
1439 return cpu;
1440 /*
1441 * Check if this_cpu is to be skipped (i.e., it is
1442 * not in the mask) or not.
1443 */
1444 if (!cpumask_test_cpu(this_cpu, later_mask))
1445 this_cpu = -1;
1446
1447 rcu_read_lock();
1448 for_each_domain(cpu, sd) {
1449 if (sd->flags & SD_WAKE_AFFINE) {
1450
1451 /*
1452 * If possible, preempting this_cpu is
1453 * cheaper than migrating.
1454 */
1455 if (this_cpu != -1 &&
1456 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1457 rcu_read_unlock();
1458 return this_cpu;
1459 }
1460
1461 /*
1462 * Last chance: if best_cpu is valid and is
1463 * in the mask, that becomes our choice.
1464 */
1465 if (best_cpu < nr_cpu_ids &&
1466 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1467 rcu_read_unlock();
1468 return best_cpu;
1469 }
1470 }
1471 }
1472 rcu_read_unlock();
1473
1474 /*
1475 * At this point, all our guesses failed, we just return
1476 * 'something', and let the caller sort the things out.
1477 */
1478 if (this_cpu != -1)
1479 return this_cpu;
1480
1481 cpu = cpumask_any(later_mask);
1482 if (cpu < nr_cpu_ids)
1483 return cpu;
1484
1485 return -1;
1486 }
1487
1488 /* Locks the rq it finds */
find_lock_later_rq(struct task_struct * task,struct rq * rq)1489 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1490 {
1491 struct rq *later_rq = NULL;
1492 int tries;
1493 int cpu;
1494
1495 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1496 cpu = find_later_rq(task);
1497
1498 if ((cpu == -1) || (cpu == rq->cpu))
1499 break;
1500
1501 later_rq = cpu_rq(cpu);
1502
1503 if (later_rq->dl.dl_nr_running &&
1504 !dl_time_before(task->dl.deadline,
1505 later_rq->dl.earliest_dl.curr)) {
1506 /*
1507 * Target rq has tasks of equal or earlier deadline,
1508 * retrying does not release any lock and is unlikely
1509 * to yield a different result.
1510 */
1511 later_rq = NULL;
1512 break;
1513 }
1514
1515 /* Retry if something changed. */
1516 if (double_lock_balance(rq, later_rq)) {
1517 if (unlikely(task_rq(task) != rq ||
1518 !cpumask_test_cpu(later_rq->cpu,
1519 tsk_cpus_allowed(task)) ||
1520 task_running(rq, task) ||
1521 !dl_task(task) ||
1522 !task_on_rq_queued(task))) {
1523 double_unlock_balance(rq, later_rq);
1524 later_rq = NULL;
1525 break;
1526 }
1527 }
1528
1529 /*
1530 * If the rq we found has no -deadline task, or
1531 * its earliest one has a later deadline than our
1532 * task, the rq is a good one.
1533 */
1534 if (!later_rq->dl.dl_nr_running ||
1535 dl_time_before(task->dl.deadline,
1536 later_rq->dl.earliest_dl.curr))
1537 break;
1538
1539 /* Otherwise we try again. */
1540 double_unlock_balance(rq, later_rq);
1541 later_rq = NULL;
1542 }
1543
1544 return later_rq;
1545 }
1546
pick_next_pushable_dl_task(struct rq * rq)1547 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1548 {
1549 struct task_struct *p;
1550
1551 if (!has_pushable_dl_tasks(rq))
1552 return NULL;
1553
1554 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1555 struct task_struct, pushable_dl_tasks);
1556
1557 BUG_ON(rq->cpu != task_cpu(p));
1558 BUG_ON(task_current(rq, p));
1559 BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
1560
1561 BUG_ON(!task_on_rq_queued(p));
1562 BUG_ON(!dl_task(p));
1563
1564 return p;
1565 }
1566
1567 /*
1568 * See if the non running -deadline tasks on this rq
1569 * can be sent to some other CPU where they can preempt
1570 * and start executing.
1571 */
push_dl_task(struct rq * rq)1572 static int push_dl_task(struct rq *rq)
1573 {
1574 struct task_struct *next_task;
1575 struct rq *later_rq;
1576 int ret = 0;
1577
1578 if (!rq->dl.overloaded)
1579 return 0;
1580
1581 next_task = pick_next_pushable_dl_task(rq);
1582 if (!next_task)
1583 return 0;
1584
1585 retry:
1586 if (unlikely(next_task == rq->curr)) {
1587 WARN_ON(1);
1588 return 0;
1589 }
1590
1591 /*
1592 * If next_task preempts rq->curr, and rq->curr
1593 * can move away, it makes sense to just reschedule
1594 * without going further in pushing next_task.
1595 */
1596 if (dl_task(rq->curr) &&
1597 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1598 tsk_nr_cpus_allowed(rq->curr) > 1) {
1599 resched_curr(rq);
1600 return 0;
1601 }
1602
1603 /* We might release rq lock */
1604 get_task_struct(next_task);
1605
1606 /* Will lock the rq it'll find */
1607 later_rq = find_lock_later_rq(next_task, rq);
1608 if (!later_rq) {
1609 struct task_struct *task;
1610
1611 /*
1612 * We must check all this again, since
1613 * find_lock_later_rq releases rq->lock and it is
1614 * then possible that next_task has migrated.
1615 */
1616 task = pick_next_pushable_dl_task(rq);
1617 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1618 /*
1619 * The task is still there. We don't try
1620 * again, some other cpu will pull it when ready.
1621 */
1622 goto out;
1623 }
1624
1625 if (!task)
1626 /* No more tasks */
1627 goto out;
1628
1629 put_task_struct(next_task);
1630 next_task = task;
1631 goto retry;
1632 }
1633
1634 deactivate_task(rq, next_task, 0);
1635 next_task->on_rq = TASK_ON_RQ_MIGRATING;
1636 set_task_cpu(next_task, later_rq->cpu);
1637 next_task->on_rq = TASK_ON_RQ_QUEUED;
1638 activate_task(later_rq, next_task, 0);
1639 ret = 1;
1640
1641 resched_curr(later_rq);
1642
1643 double_unlock_balance(rq, later_rq);
1644
1645 out:
1646 put_task_struct(next_task);
1647
1648 return ret;
1649 }
1650
push_dl_tasks(struct rq * rq)1651 static void push_dl_tasks(struct rq *rq)
1652 {
1653 /* push_dl_task() will return true if it moved a -deadline task */
1654 while (push_dl_task(rq))
1655 ;
1656 }
1657
pull_dl_task(struct rq * this_rq)1658 static void pull_dl_task(struct rq *this_rq)
1659 {
1660 int this_cpu = this_rq->cpu, cpu;
1661 struct task_struct *p;
1662 bool resched = false;
1663 struct rq *src_rq;
1664 u64 dmin = LONG_MAX;
1665
1666 if (likely(!dl_overloaded(this_rq)))
1667 return;
1668
1669 /*
1670 * Match the barrier from dl_set_overloaded; this guarantees that if we
1671 * see overloaded we must also see the dlo_mask bit.
1672 */
1673 smp_rmb();
1674
1675 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1676 if (this_cpu == cpu)
1677 continue;
1678
1679 src_rq = cpu_rq(cpu);
1680
1681 /*
1682 * It looks racy, abd it is! However, as in sched_rt.c,
1683 * we are fine with this.
1684 */
1685 if (this_rq->dl.dl_nr_running &&
1686 dl_time_before(this_rq->dl.earliest_dl.curr,
1687 src_rq->dl.earliest_dl.next))
1688 continue;
1689
1690 /* Might drop this_rq->lock */
1691 double_lock_balance(this_rq, src_rq);
1692
1693 /*
1694 * If there are no more pullable tasks on the
1695 * rq, we're done with it.
1696 */
1697 if (src_rq->dl.dl_nr_running <= 1)
1698 goto skip;
1699
1700 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1701
1702 /*
1703 * We found a task to be pulled if:
1704 * - it preempts our current (if there's one),
1705 * - it will preempt the last one we pulled (if any).
1706 */
1707 if (p && dl_time_before(p->dl.deadline, dmin) &&
1708 (!this_rq->dl.dl_nr_running ||
1709 dl_time_before(p->dl.deadline,
1710 this_rq->dl.earliest_dl.curr))) {
1711 WARN_ON(p == src_rq->curr);
1712 WARN_ON(!task_on_rq_queued(p));
1713
1714 /*
1715 * Then we pull iff p has actually an earlier
1716 * deadline than the current task of its runqueue.
1717 */
1718 if (dl_time_before(p->dl.deadline,
1719 src_rq->curr->dl.deadline))
1720 goto skip;
1721
1722 resched = true;
1723
1724 deactivate_task(src_rq, p, 0);
1725 p->on_rq = TASK_ON_RQ_MIGRATING;
1726 set_task_cpu(p, this_cpu);
1727 p->on_rq = TASK_ON_RQ_QUEUED;
1728 activate_task(this_rq, p, 0);
1729 dmin = p->dl.deadline;
1730
1731 /* Is there any other task even earlier? */
1732 }
1733 skip:
1734 double_unlock_balance(this_rq, src_rq);
1735 }
1736
1737 if (resched)
1738 resched_curr(this_rq);
1739 }
1740
1741 /*
1742 * Since the task is not running and a reschedule is not going to happen
1743 * anytime soon on its runqueue, we try pushing it away now.
1744 */
task_woken_dl(struct rq * rq,struct task_struct * p)1745 static void task_woken_dl(struct rq *rq, struct task_struct *p)
1746 {
1747 if (!task_running(rq, p) &&
1748 !test_tsk_need_resched(rq->curr) &&
1749 tsk_nr_cpus_allowed(p) > 1 &&
1750 dl_task(rq->curr) &&
1751 (tsk_nr_cpus_allowed(rq->curr) < 2 ||
1752 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1753 push_dl_tasks(rq);
1754 }
1755 }
1756
set_cpus_allowed_dl(struct task_struct * p,const struct cpumask * new_mask)1757 static void set_cpus_allowed_dl(struct task_struct *p,
1758 const struct cpumask *new_mask)
1759 {
1760 struct root_domain *src_rd;
1761 struct rq *rq;
1762
1763 BUG_ON(!dl_task(p));
1764
1765 rq = task_rq(p);
1766 src_rd = rq->rd;
1767 /*
1768 * Migrating a SCHED_DEADLINE task between exclusive
1769 * cpusets (different root_domains) entails a bandwidth
1770 * update. We already made space for us in the destination
1771 * domain (see cpuset_can_attach()).
1772 */
1773 if (!cpumask_intersects(src_rd->span, new_mask)) {
1774 struct dl_bw *src_dl_b;
1775
1776 src_dl_b = dl_bw_of(cpu_of(rq));
1777 /*
1778 * We now free resources of the root_domain we are migrating
1779 * off. In the worst case, sched_setattr() may temporary fail
1780 * until we complete the update.
1781 */
1782 raw_spin_lock(&src_dl_b->lock);
1783 __dl_clear(src_dl_b, p->dl.dl_bw);
1784 raw_spin_unlock(&src_dl_b->lock);
1785 }
1786
1787 set_cpus_allowed_common(p, new_mask);
1788 }
1789
1790 /* Assumes rq->lock is held */
rq_online_dl(struct rq * rq)1791 static void rq_online_dl(struct rq *rq)
1792 {
1793 if (rq->dl.overloaded)
1794 dl_set_overload(rq);
1795
1796 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1797 if (rq->dl.dl_nr_running > 0)
1798 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
1799 }
1800
1801 /* Assumes rq->lock is held */
rq_offline_dl(struct rq * rq)1802 static void rq_offline_dl(struct rq *rq)
1803 {
1804 if (rq->dl.overloaded)
1805 dl_clear_overload(rq);
1806
1807 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1808 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1809 }
1810
init_sched_dl_class(void)1811 void __init init_sched_dl_class(void)
1812 {
1813 unsigned int i;
1814
1815 for_each_possible_cpu(i)
1816 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1817 GFP_KERNEL, cpu_to_node(i));
1818 }
1819
1820 #endif /* CONFIG_SMP */
1821
switched_from_dl(struct rq * rq,struct task_struct * p)1822 static void switched_from_dl(struct rq *rq, struct task_struct *p)
1823 {
1824 /*
1825 * Start the deadline timer; if we switch back to dl before this we'll
1826 * continue consuming our current CBS slice. If we stay outside of
1827 * SCHED_DEADLINE until the deadline passes, the timer will reset the
1828 * task.
1829 */
1830 if (!start_dl_timer(p))
1831 __dl_clear_params(p);
1832
1833 /*
1834 * Since this might be the only -deadline task on the rq,
1835 * this is the right place to try to pull some other one
1836 * from an overloaded cpu, if any.
1837 */
1838 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1839 return;
1840
1841 queue_pull_task(rq);
1842 }
1843
1844 /*
1845 * When switching to -deadline, we may overload the rq, then
1846 * we try to push someone off, if possible.
1847 */
switched_to_dl(struct rq * rq,struct task_struct * p)1848 static void switched_to_dl(struct rq *rq, struct task_struct *p)
1849 {
1850
1851 /* If p is not queued we will update its parameters at next wakeup. */
1852 if (!task_on_rq_queued(p))
1853 return;
1854
1855 /*
1856 * If p is boosted we already updated its params in
1857 * rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH),
1858 * p's deadline being now already after rq_clock(rq).
1859 */
1860 if (dl_time_before(p->dl.deadline, rq_clock(rq)))
1861 setup_new_dl_entity(&p->dl);
1862
1863 if (rq->curr != p) {
1864 #ifdef CONFIG_SMP
1865 if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
1866 queue_push_tasks(rq);
1867 #endif
1868 if (dl_task(rq->curr))
1869 check_preempt_curr_dl(rq, p, 0);
1870 else
1871 resched_curr(rq);
1872 }
1873 }
1874
1875 /*
1876 * If the scheduling parameters of a -deadline task changed,
1877 * a push or pull operation might be needed.
1878 */
prio_changed_dl(struct rq * rq,struct task_struct * p,int oldprio)1879 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1880 int oldprio)
1881 {
1882 if (task_on_rq_queued(p) || rq->curr == p) {
1883 #ifdef CONFIG_SMP
1884 /*
1885 * This might be too much, but unfortunately
1886 * we don't have the old deadline value, and
1887 * we can't argue if the task is increasing
1888 * or lowering its prio, so...
1889 */
1890 if (!rq->dl.overloaded)
1891 queue_pull_task(rq);
1892
1893 /*
1894 * If we now have a earlier deadline task than p,
1895 * then reschedule, provided p is still on this
1896 * runqueue.
1897 */
1898 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1899 resched_curr(rq);
1900 #else
1901 /*
1902 * Again, we don't know if p has a earlier
1903 * or later deadline, so let's blindly set a
1904 * (maybe not needed) rescheduling point.
1905 */
1906 resched_curr(rq);
1907 #endif /* CONFIG_SMP */
1908 }
1909 }
1910
1911 const struct sched_class dl_sched_class = {
1912 .next = &rt_sched_class,
1913 .enqueue_task = enqueue_task_dl,
1914 .dequeue_task = dequeue_task_dl,
1915 .yield_task = yield_task_dl,
1916
1917 .check_preempt_curr = check_preempt_curr_dl,
1918
1919 .pick_next_task = pick_next_task_dl,
1920 .put_prev_task = put_prev_task_dl,
1921
1922 #ifdef CONFIG_SMP
1923 .select_task_rq = select_task_rq_dl,
1924 .set_cpus_allowed = set_cpus_allowed_dl,
1925 .rq_online = rq_online_dl,
1926 .rq_offline = rq_offline_dl,
1927 .task_woken = task_woken_dl,
1928 #endif
1929
1930 .set_curr_task = set_curr_task_dl,
1931 .task_tick = task_tick_dl,
1932 .task_fork = task_fork_dl,
1933 .task_dead = task_dead_dl,
1934
1935 .prio_changed = prio_changed_dl,
1936 .switched_from = switched_from_dl,
1937 .switched_to = switched_to_dl,
1938
1939 .update_curr = update_curr_dl,
1940 };
1941
1942 #ifdef CONFIG_SCHED_DEBUG
1943 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1944
print_dl_stats(struct seq_file * m,int cpu)1945 void print_dl_stats(struct seq_file *m, int cpu)
1946 {
1947 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1948 }
1949 #endif /* CONFIG_SCHED_DEBUG */
1950