1 /*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
16 */
17 #include "sched.h"
18
19 #include <linux/slab.h>
20
21 struct dl_bandwidth def_dl_bandwidth;
22
dl_task_of(struct sched_dl_entity * dl_se)23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24 {
25 return container_of(dl_se, struct task_struct, dl);
26 }
27
rq_of_dl_rq(struct dl_rq * dl_rq)28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29 {
30 return container_of(dl_rq, struct rq, dl);
31 }
32
dl_rq_of_se(struct sched_dl_entity * dl_se)33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34 {
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39 }
40
on_dl_rq(struct sched_dl_entity * dl_se)41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42 {
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44 }
45
add_average_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)46 static void add_average_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
47 {
48 u64 se_bw = dl_se->dl_bw;
49
50 dl_rq->avg_bw += se_bw;
51 }
52
clear_average_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)53 static void clear_average_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
54 {
55 u64 se_bw = dl_se->dl_bw;
56
57 dl_rq->avg_bw -= se_bw;
58 if (dl_rq->avg_bw < 0) {
59 WARN_ON(1);
60 dl_rq->avg_bw = 0;
61 }
62 }
63
is_leftmost(struct task_struct * p,struct dl_rq * dl_rq)64 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
65 {
66 struct sched_dl_entity *dl_se = &p->dl;
67
68 return dl_rq->rb_leftmost == &dl_se->rb_node;
69 }
70
init_dl_bandwidth(struct dl_bandwidth * dl_b,u64 period,u64 runtime)71 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
72 {
73 raw_spin_lock_init(&dl_b->dl_runtime_lock);
74 dl_b->dl_period = period;
75 dl_b->dl_runtime = runtime;
76 }
77
init_dl_bw(struct dl_bw * dl_b)78 void init_dl_bw(struct dl_bw *dl_b)
79 {
80 raw_spin_lock_init(&dl_b->lock);
81 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
82 if (global_rt_runtime() == RUNTIME_INF)
83 dl_b->bw = -1;
84 else
85 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
86 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
87 dl_b->total_bw = 0;
88 }
89
init_dl_rq(struct dl_rq * dl_rq,struct rq * rq)90 void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
91 {
92 dl_rq->rb_root = RB_ROOT;
93
94 #ifdef CONFIG_SMP
95 /* zero means no -deadline tasks */
96 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
97
98 dl_rq->dl_nr_migratory = 0;
99 dl_rq->overloaded = 0;
100 dl_rq->pushable_dl_tasks_root = RB_ROOT;
101 #else
102 init_dl_bw(&dl_rq->dl_bw);
103 #endif
104 }
105
106 #ifdef CONFIG_SMP
107
dl_overloaded(struct rq * rq)108 static inline int dl_overloaded(struct rq *rq)
109 {
110 return atomic_read(&rq->rd->dlo_count);
111 }
112
dl_set_overload(struct rq * rq)113 static inline void dl_set_overload(struct rq *rq)
114 {
115 if (!rq->online)
116 return;
117
118 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
119 /*
120 * Must be visible before the overload count is
121 * set (as in sched_rt.c).
122 *
123 * Matched by the barrier in pull_dl_task().
124 */
125 smp_wmb();
126 atomic_inc(&rq->rd->dlo_count);
127 }
128
dl_clear_overload(struct rq * rq)129 static inline void dl_clear_overload(struct rq *rq)
130 {
131 if (!rq->online)
132 return;
133
134 atomic_dec(&rq->rd->dlo_count);
135 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
136 }
137
update_dl_migration(struct dl_rq * dl_rq)138 static void update_dl_migration(struct dl_rq *dl_rq)
139 {
140 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
141 if (!dl_rq->overloaded) {
142 dl_set_overload(rq_of_dl_rq(dl_rq));
143 dl_rq->overloaded = 1;
144 }
145 } else if (dl_rq->overloaded) {
146 dl_clear_overload(rq_of_dl_rq(dl_rq));
147 dl_rq->overloaded = 0;
148 }
149 }
150
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)151 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
152 {
153 struct task_struct *p = dl_task_of(dl_se);
154
155 if (p->nr_cpus_allowed > 1)
156 dl_rq->dl_nr_migratory++;
157
158 update_dl_migration(dl_rq);
159 }
160
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)161 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
162 {
163 struct task_struct *p = dl_task_of(dl_se);
164
165 if (p->nr_cpus_allowed > 1)
166 dl_rq->dl_nr_migratory--;
167
168 update_dl_migration(dl_rq);
169 }
170
171 /*
172 * The list of pushable -deadline task is not a plist, like in
173 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
174 */
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)175 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
176 {
177 struct dl_rq *dl_rq = &rq->dl;
178 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
179 struct rb_node *parent = NULL;
180 struct task_struct *entry;
181 int leftmost = 1;
182
183 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
184
185 while (*link) {
186 parent = *link;
187 entry = rb_entry(parent, struct task_struct,
188 pushable_dl_tasks);
189 if (dl_entity_preempt(&p->dl, &entry->dl))
190 link = &parent->rb_left;
191 else {
192 link = &parent->rb_right;
193 leftmost = 0;
194 }
195 }
196
197 if (leftmost)
198 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
199
200 rb_link_node(&p->pushable_dl_tasks, parent, link);
201 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
202 }
203
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)204 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
205 {
206 struct dl_rq *dl_rq = &rq->dl;
207
208 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
209 return;
210
211 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
212 struct rb_node *next_node;
213
214 next_node = rb_next(&p->pushable_dl_tasks);
215 dl_rq->pushable_dl_tasks_leftmost = next_node;
216 }
217
218 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
219 RB_CLEAR_NODE(&p->pushable_dl_tasks);
220 }
221
has_pushable_dl_tasks(struct rq * rq)222 static inline int has_pushable_dl_tasks(struct rq *rq)
223 {
224 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
225 }
226
227 static int push_dl_task(struct rq *rq);
228
need_pull_dl_task(struct rq * rq,struct task_struct * prev)229 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
230 {
231 return dl_task(prev);
232 }
233
set_post_schedule(struct rq * rq)234 static inline void set_post_schedule(struct rq *rq)
235 {
236 rq->post_schedule = has_pushable_dl_tasks(rq);
237 }
238
239 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
240
dl_task_offline_migration(struct rq * rq,struct task_struct * p)241 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
242 {
243 struct rq *later_rq = NULL;
244 bool fallback = false;
245
246 later_rq = find_lock_later_rq(p, rq);
247
248 if (!later_rq) {
249 int cpu;
250
251 /*
252 * If we cannot preempt any rq, fall back to pick any
253 * online cpu.
254 */
255 fallback = true;
256 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
257 if (cpu >= nr_cpu_ids) {
258 /*
259 * Fail to find any suitable cpu.
260 * The task will never come back!
261 */
262 BUG_ON(dl_bandwidth_enabled());
263
264 /*
265 * If admission control is disabled we
266 * try a little harder to let the task
267 * run.
268 */
269 cpu = cpumask_any(cpu_active_mask);
270 }
271 later_rq = cpu_rq(cpu);
272 double_lock_balance(rq, later_rq);
273 }
274
275 /*
276 * By now the task is replenished and enqueued; migrate it.
277 */
278 deactivate_task(rq, p, 0);
279 set_task_cpu(p, later_rq->cpu);
280 activate_task(later_rq, p, 0);
281
282 if (!fallback)
283 resched_curr(later_rq);
284
285 double_unlock_balance(later_rq, rq);
286
287 return later_rq;
288 }
289
290 #else
291
292 static inline
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)293 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
294 {
295 }
296
297 static inline
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)298 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
299 {
300 }
301
302 static inline
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)303 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
304 {
305 }
306
307 static inline
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)308 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
309 {
310 }
311
need_pull_dl_task(struct rq * rq,struct task_struct * prev)312 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
313 {
314 return false;
315 }
316
pull_dl_task(struct rq * rq)317 static inline int pull_dl_task(struct rq *rq)
318 {
319 return 0;
320 }
321
set_post_schedule(struct rq * rq)322 static inline void set_post_schedule(struct rq *rq)
323 {
324 }
325 #endif /* CONFIG_SMP */
326
327 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
328 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
329 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
330 int flags);
331
332 /*
333 * We are being explicitly informed that a new instance is starting,
334 * and this means that:
335 * - the absolute deadline of the entity has to be placed at
336 * current time + relative deadline;
337 * - the runtime of the entity has to be set to the maximum value.
338 *
339 * The capability of specifying such event is useful whenever a -deadline
340 * entity wants to (try to!) synchronize its behaviour with the scheduler's
341 * one, and to (try to!) reconcile itself with its own scheduling
342 * parameters.
343 */
setup_new_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se)344 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
345 struct sched_dl_entity *pi_se)
346 {
347 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
348 struct rq *rq = rq_of_dl_rq(dl_rq);
349
350 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
351
352 /*
353 * We use the regular wall clock time to set deadlines in the
354 * future; in fact, we must consider execution overheads (time
355 * spent on hardirq context, etc.).
356 */
357 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
358 dl_se->runtime = pi_se->dl_runtime;
359 dl_se->dl_new = 0;
360 }
361
362 /*
363 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
364 * possibility of a entity lasting more than what it declared, and thus
365 * exhausting its runtime.
366 *
367 * Here we are interested in making runtime overrun possible, but we do
368 * not want a entity which is misbehaving to affect the scheduling of all
369 * other entities.
370 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
371 * is used, in order to confine each entity within its own bandwidth.
372 *
373 * This function deals exactly with that, and ensures that when the runtime
374 * of a entity is replenished, its deadline is also postponed. That ensures
375 * the overrunning entity can't interfere with other entity in the system and
376 * can't make them miss their deadlines. Reasons why this kind of overruns
377 * could happen are, typically, a entity voluntarily trying to overcome its
378 * runtime, or it just underestimated it during sched_setattr().
379 */
replenish_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se)380 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
381 struct sched_dl_entity *pi_se)
382 {
383 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
384 struct rq *rq = rq_of_dl_rq(dl_rq);
385
386 BUG_ON(pi_se->dl_runtime <= 0);
387
388 /*
389 * This could be the case for a !-dl task that is boosted.
390 * Just go with full inherited parameters.
391 */
392 if (dl_se->dl_deadline == 0) {
393 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
394 dl_se->runtime = pi_se->dl_runtime;
395 }
396
397 /*
398 * We keep moving the deadline away until we get some
399 * available runtime for the entity. This ensures correct
400 * handling of situations where the runtime overrun is
401 * arbitrary large.
402 */
403 while (dl_se->runtime <= 0) {
404 dl_se->deadline += pi_se->dl_period;
405 dl_se->runtime += pi_se->dl_runtime;
406 }
407
408 /*
409 * At this point, the deadline really should be "in
410 * the future" with respect to rq->clock. If it's
411 * not, we are, for some reason, lagging too much!
412 * Anyway, after having warn userspace abut that,
413 * we still try to keep the things running by
414 * resetting the deadline and the budget of the
415 * entity.
416 */
417 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
418 printk_deferred_once("sched: DL replenish lagged to much\n");
419 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
420 dl_se->runtime = pi_se->dl_runtime;
421 }
422
423 if (dl_se->dl_yielded)
424 dl_se->dl_yielded = 0;
425 if (dl_se->dl_throttled)
426 dl_se->dl_throttled = 0;
427 }
428
429 /*
430 * Here we check if --at time t-- an entity (which is probably being
431 * [re]activated or, in general, enqueued) can use its remaining runtime
432 * and its current deadline _without_ exceeding the bandwidth it is
433 * assigned (function returns true if it can't). We are in fact applying
434 * one of the CBS rules: when a task wakes up, if the residual runtime
435 * over residual deadline fits within the allocated bandwidth, then we
436 * can keep the current (absolute) deadline and residual budget without
437 * disrupting the schedulability of the system. Otherwise, we should
438 * refill the runtime and set the deadline a period in the future,
439 * because keeping the current (absolute) deadline of the task would
440 * result in breaking guarantees promised to other tasks (refer to
441 * Documentation/scheduler/sched-deadline.txt for more informations).
442 *
443 * This function returns true if:
444 *
445 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
446 *
447 * IOW we can't recycle current parameters.
448 *
449 * Notice that the bandwidth check is done against the deadline. For
450 * task with deadline equal to period this is the same of using
451 * dl_period instead of dl_deadline in the equation above.
452 */
dl_entity_overflow(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se,u64 t)453 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
454 struct sched_dl_entity *pi_se, u64 t)
455 {
456 u64 left, right;
457
458 /*
459 * left and right are the two sides of the equation above,
460 * after a bit of shuffling to use multiplications instead
461 * of divisions.
462 *
463 * Note that none of the time values involved in the two
464 * multiplications are absolute: dl_deadline and dl_runtime
465 * are the relative deadline and the maximum runtime of each
466 * instance, runtime is the runtime left for the last instance
467 * and (deadline - t), since t is rq->clock, is the time left
468 * to the (absolute) deadline. Even if overflowing the u64 type
469 * is very unlikely to occur in both cases, here we scale down
470 * as we want to avoid that risk at all. Scaling down by 10
471 * means that we reduce granularity to 1us. We are fine with it,
472 * since this is only a true/false check and, anyway, thinking
473 * of anything below microseconds resolution is actually fiction
474 * (but still we want to give the user that illusion >;).
475 */
476 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
477 right = ((dl_se->deadline - t) >> DL_SCALE) *
478 (pi_se->dl_runtime >> DL_SCALE);
479
480 return dl_time_before(right, left);
481 }
482
483 /*
484 * When a -deadline entity is queued back on the runqueue, its runtime and
485 * deadline might need updating.
486 *
487 * The policy here is that we update the deadline of the entity only if:
488 * - the current deadline is in the past,
489 * - using the remaining runtime with the current deadline would make
490 * the entity exceed its bandwidth.
491 */
update_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se)492 static void update_dl_entity(struct sched_dl_entity *dl_se,
493 struct sched_dl_entity *pi_se)
494 {
495 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
496 struct rq *rq = rq_of_dl_rq(dl_rq);
497
498 if (dl_se->dl_new)
499 add_average_bw(dl_se, dl_rq);
500
501 /*
502 * The arrival of a new instance needs special treatment, i.e.,
503 * the actual scheduling parameters have to be "renewed".
504 */
505 if (dl_se->dl_new) {
506 setup_new_dl_entity(dl_se, pi_se);
507 return;
508 }
509
510 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
511 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
512 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
513 dl_se->runtime = pi_se->dl_runtime;
514 }
515 }
516
517 /*
518 * If the entity depleted all its runtime, and if we want it to sleep
519 * while waiting for some new execution time to become available, we
520 * set the bandwidth enforcement timer to the replenishment instant
521 * and try to activate it.
522 *
523 * Notice that it is important for the caller to know if the timer
524 * actually started or not (i.e., the replenishment instant is in
525 * the future or in the past).
526 */
start_dl_timer(struct task_struct * p)527 static int start_dl_timer(struct task_struct *p)
528 {
529 struct sched_dl_entity *dl_se = &p->dl;
530 struct hrtimer *timer = &dl_se->dl_timer;
531 struct rq *rq = task_rq(p);
532 ktime_t now, act;
533 s64 delta;
534
535 lockdep_assert_held(&rq->lock);
536
537 /*
538 * We want the timer to fire at the deadline, but considering
539 * that it is actually coming from rq->clock and not from
540 * hrtimer's time base reading.
541 */
542 act = ns_to_ktime(dl_se->deadline);
543 now = hrtimer_cb_get_time(timer);
544 delta = ktime_to_ns(now) - rq_clock(rq);
545 act = ktime_add_ns(act, delta);
546
547 /*
548 * If the expiry time already passed, e.g., because the value
549 * chosen as the deadline is too small, don't even try to
550 * start the timer in the past!
551 */
552 if (ktime_us_delta(act, now) < 0)
553 return 0;
554
555 /*
556 * !enqueued will guarantee another callback; even if one is already in
557 * progress. This ensures a balanced {get,put}_task_struct().
558 *
559 * The race against __run_timer() clearing the enqueued state is
560 * harmless because we're holding task_rq()->lock, therefore the timer
561 * expiring after we've done the check will wait on its task_rq_lock()
562 * and observe our state.
563 */
564 if (!hrtimer_is_queued(timer)) {
565 get_task_struct(p);
566 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
567 }
568
569 return 1;
570 }
571
572 /*
573 * This is the bandwidth enforcement timer callback. If here, we know
574 * a task is not on its dl_rq, since the fact that the timer was running
575 * means the task is throttled and needs a runtime replenishment.
576 *
577 * However, what we actually do depends on the fact the task is active,
578 * (it is on its rq) or has been removed from there by a call to
579 * dequeue_task_dl(). In the former case we must issue the runtime
580 * replenishment and add the task back to the dl_rq; in the latter, we just
581 * do nothing but clearing dl_throttled, so that runtime and deadline
582 * updating (and the queueing back to dl_rq) will be done by the
583 * next call to enqueue_task_dl().
584 */
dl_task_timer(struct hrtimer * timer)585 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
586 {
587 struct sched_dl_entity *dl_se = container_of(timer,
588 struct sched_dl_entity,
589 dl_timer);
590 struct task_struct *p = dl_task_of(dl_se);
591 struct rq *rq;
592 again:
593 rq = task_rq(p);
594 raw_spin_lock(&rq->lock);
595
596 if (rq != task_rq(p)) {
597 /* Task was moved, retrying. */
598 raw_spin_unlock(&rq->lock);
599 goto again;
600 }
601
602 /*
603 * The task might have changed its scheduling policy to something
604 * different than SCHED_DEADLINE (through switched_fromd_dl()).
605 */
606 if (!dl_task(p)) {
607 __dl_clear_params(p);
608 goto unlock;
609 }
610
611 /*
612 * This is possible if switched_from_dl() raced against a running
613 * callback that took the above !dl_task() path and we've since then
614 * switched back into SCHED_DEADLINE.
615 *
616 * There's nothing to do except drop our task reference.
617 */
618 if (dl_se->dl_new)
619 goto unlock;
620
621 /*
622 * The task might have been boosted by someone else and might be in the
623 * boosting/deboosting path, its not throttled.
624 */
625 if (dl_se->dl_boosted)
626 goto unlock;
627
628 /*
629 * Spurious timer due to start_dl_timer() race; or we already received
630 * a replenishment from rt_mutex_setprio().
631 */
632 if (!dl_se->dl_throttled)
633 goto unlock;
634
635 sched_clock_tick();
636 update_rq_clock(rq);
637
638 /*
639 * If the throttle happened during sched-out; like:
640 *
641 * schedule()
642 * deactivate_task()
643 * dequeue_task_dl()
644 * update_curr_dl()
645 * start_dl_timer()
646 * __dequeue_task_dl()
647 * prev->on_rq = 0;
648 *
649 * We can be both throttled and !queued. Replenish the counter
650 * but do not enqueue -- wait for our wakeup to do that.
651 */
652 if (!task_on_rq_queued(p)) {
653 replenish_dl_entity(dl_se, dl_se);
654 goto unlock;
655 }
656
657 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
658 if (dl_task(rq->curr))
659 check_preempt_curr_dl(rq, p, 0);
660 else
661 resched_curr(rq);
662
663 #ifdef CONFIG_SMP
664 /*
665 * Perform balancing operations here; after the replenishments. We
666 * cannot drop rq->lock before this, otherwise the assertion in
667 * start_dl_timer() about not missing updates is not true.
668 *
669 * If we find that the rq the task was on is no longer available, we
670 * need to select a new rq.
671 *
672 * XXX figure out if select_task_rq_dl() deals with offline cpus.
673 */
674 if (unlikely(!rq->online))
675 rq = dl_task_offline_migration(rq, p);
676
677 /*
678 * Queueing this task back might have overloaded rq, check if we need
679 * to kick someone away.
680 */
681 if (has_pushable_dl_tasks(rq))
682 push_dl_task(rq);
683 #endif
684
685 unlock:
686 raw_spin_unlock(&rq->lock);
687
688 /*
689 * This can free the task_struct, including this hrtimer, do not touch
690 * anything related to that after this.
691 */
692 put_task_struct(p);
693
694 return HRTIMER_NORESTART;
695 }
696
init_dl_task_timer(struct sched_dl_entity * dl_se)697 void init_dl_task_timer(struct sched_dl_entity *dl_se)
698 {
699 struct hrtimer *timer = &dl_se->dl_timer;
700
701 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
702 timer->function = dl_task_timer;
703 }
704
705 static
dl_runtime_exceeded(struct rq * rq,struct sched_dl_entity * dl_se)706 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
707 {
708 return (dl_se->runtime <= 0);
709 }
710
711 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
712
713 /*
714 * Update the current task's runtime statistics (provided it is still
715 * a -deadline task and has not been removed from the dl_rq).
716 */
update_curr_dl(struct rq * rq)717 static void update_curr_dl(struct rq *rq)
718 {
719 struct task_struct *curr = rq->curr;
720 struct sched_dl_entity *dl_se = &curr->dl;
721 u64 delta_exec;
722
723 if (!dl_task(curr) || !on_dl_rq(dl_se))
724 return;
725
726 /*
727 * Consumed budget is computed considering the time as
728 * observed by schedulable tasks (excluding time spent
729 * in hardirq context, etc.). Deadlines are instead
730 * computed using hard walltime. This seems to be the more
731 * natural solution, but the full ramifications of this
732 * approach need further study.
733 */
734 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
735 if (unlikely((s64)delta_exec <= 0))
736 return;
737
738 schedstat_set(curr->se.statistics.exec_max,
739 max(curr->se.statistics.exec_max, delta_exec));
740
741 curr->se.sum_exec_runtime += delta_exec;
742 account_group_exec_runtime(curr, delta_exec);
743
744 curr->se.exec_start = rq_clock_task(rq);
745 cpuacct_charge(curr, delta_exec);
746
747 dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
748 if (dl_runtime_exceeded(rq, dl_se)) {
749 dl_se->dl_throttled = 1;
750 __dequeue_task_dl(rq, curr, 0);
751 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
752 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
753
754 if (!is_leftmost(curr, &rq->dl))
755 resched_curr(rq);
756 }
757
758 /*
759 * Because -- for now -- we share the rt bandwidth, we need to
760 * account our runtime there too, otherwise actual rt tasks
761 * would be able to exceed the shared quota.
762 *
763 * Account to the root rt group for now.
764 *
765 * The solution we're working towards is having the RT groups scheduled
766 * using deadline servers -- however there's a few nasties to figure
767 * out before that can happen.
768 */
769 if (rt_bandwidth_enabled()) {
770 struct rt_rq *rt_rq = &rq->rt;
771
772 raw_spin_lock(&rt_rq->rt_runtime_lock);
773 /*
774 * We'll let actual RT tasks worry about the overflow here, we
775 * have our own CBS to keep us inline; only account when RT
776 * bandwidth is relevant.
777 */
778 if (sched_rt_bandwidth_account(rt_rq))
779 rt_rq->rt_time += delta_exec;
780 raw_spin_unlock(&rt_rq->rt_runtime_lock);
781 }
782 }
783
784 #ifdef CONFIG_SMP
785
786 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
787
next_deadline(struct rq * rq)788 static inline u64 next_deadline(struct rq *rq)
789 {
790 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
791
792 if (next && dl_prio(next->prio))
793 return next->dl.deadline;
794 else
795 return 0;
796 }
797
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)798 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
799 {
800 struct rq *rq = rq_of_dl_rq(dl_rq);
801
802 if (dl_rq->earliest_dl.curr == 0 ||
803 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
804 /*
805 * If the dl_rq had no -deadline tasks, or if the new task
806 * has shorter deadline than the current one on dl_rq, we
807 * know that the previous earliest becomes our next earliest,
808 * as the new task becomes the earliest itself.
809 */
810 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
811 dl_rq->earliest_dl.curr = deadline;
812 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
813 } else if (dl_rq->earliest_dl.next == 0 ||
814 dl_time_before(deadline, dl_rq->earliest_dl.next)) {
815 /*
816 * On the other hand, if the new -deadline task has a
817 * a later deadline than the earliest one on dl_rq, but
818 * it is earlier than the next (if any), we must
819 * recompute the next-earliest.
820 */
821 dl_rq->earliest_dl.next = next_deadline(rq);
822 }
823 }
824
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)825 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
826 {
827 struct rq *rq = rq_of_dl_rq(dl_rq);
828
829 /*
830 * Since we may have removed our earliest (and/or next earliest)
831 * task we must recompute them.
832 */
833 if (!dl_rq->dl_nr_running) {
834 dl_rq->earliest_dl.curr = 0;
835 dl_rq->earliest_dl.next = 0;
836 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
837 } else {
838 struct rb_node *leftmost = dl_rq->rb_leftmost;
839 struct sched_dl_entity *entry;
840
841 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
842 dl_rq->earliest_dl.curr = entry->deadline;
843 dl_rq->earliest_dl.next = next_deadline(rq);
844 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
845 }
846 }
847
848 #else
849
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)850 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)851 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
852
853 #endif /* CONFIG_SMP */
854
855 static inline
inc_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)856 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
857 {
858 int prio = dl_task_of(dl_se)->prio;
859 u64 deadline = dl_se->deadline;
860
861 WARN_ON(!dl_prio(prio));
862 dl_rq->dl_nr_running++;
863 add_nr_running(rq_of_dl_rq(dl_rq), 1);
864
865 inc_dl_deadline(dl_rq, deadline);
866 inc_dl_migration(dl_se, dl_rq);
867 }
868
869 static inline
dec_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)870 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
871 {
872 int prio = dl_task_of(dl_se)->prio;
873
874 WARN_ON(!dl_prio(prio));
875 WARN_ON(!dl_rq->dl_nr_running);
876 dl_rq->dl_nr_running--;
877 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
878
879 dec_dl_deadline(dl_rq, dl_se->deadline);
880 dec_dl_migration(dl_se, dl_rq);
881 }
882
__enqueue_dl_entity(struct sched_dl_entity * dl_se)883 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
884 {
885 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
886 struct rb_node **link = &dl_rq->rb_root.rb_node;
887 struct rb_node *parent = NULL;
888 struct sched_dl_entity *entry;
889 int leftmost = 1;
890
891 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
892
893 while (*link) {
894 parent = *link;
895 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
896 if (dl_time_before(dl_se->deadline, entry->deadline))
897 link = &parent->rb_left;
898 else {
899 link = &parent->rb_right;
900 leftmost = 0;
901 }
902 }
903
904 if (leftmost)
905 dl_rq->rb_leftmost = &dl_se->rb_node;
906
907 rb_link_node(&dl_se->rb_node, parent, link);
908 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
909
910 inc_dl_tasks(dl_se, dl_rq);
911 }
912
__dequeue_dl_entity(struct sched_dl_entity * dl_se)913 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
914 {
915 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
916
917 if (RB_EMPTY_NODE(&dl_se->rb_node))
918 return;
919
920 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
921 struct rb_node *next_node;
922
923 next_node = rb_next(&dl_se->rb_node);
924 dl_rq->rb_leftmost = next_node;
925 }
926
927 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
928 RB_CLEAR_NODE(&dl_se->rb_node);
929
930 dec_dl_tasks(dl_se, dl_rq);
931 }
932
933 static void
enqueue_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se,int flags)934 enqueue_dl_entity(struct sched_dl_entity *dl_se,
935 struct sched_dl_entity *pi_se, int flags)
936 {
937 BUG_ON(on_dl_rq(dl_se));
938
939 /*
940 * If this is a wakeup or a new instance, the scheduling
941 * parameters of the task might need updating. Otherwise,
942 * we want a replenishment of its runtime.
943 */
944 if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
945 update_dl_entity(dl_se, pi_se);
946 else if (flags & ENQUEUE_REPLENISH)
947 replenish_dl_entity(dl_se, pi_se);
948
949 __enqueue_dl_entity(dl_se);
950 }
951
dequeue_dl_entity(struct sched_dl_entity * dl_se)952 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
953 {
954 __dequeue_dl_entity(dl_se);
955 }
956
enqueue_task_dl(struct rq * rq,struct task_struct * p,int flags)957 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
958 {
959 struct task_struct *pi_task = rt_mutex_get_top_task(p);
960 struct sched_dl_entity *pi_se = &p->dl;
961
962 /*
963 * Use the scheduling parameters of the top pi-waiter
964 * task if we have one and its (relative) deadline is
965 * smaller than our one... OTW we keep our runtime and
966 * deadline.
967 */
968 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
969 pi_se = &pi_task->dl;
970 } else if (!dl_prio(p->normal_prio)) {
971 /*
972 * Special case in which we have a !SCHED_DEADLINE task
973 * that is going to be deboosted, but exceedes its
974 * runtime while doing so. No point in replenishing
975 * it, as it's going to return back to its original
976 * scheduling class after this.
977 */
978 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
979 return;
980 }
981
982 /*
983 * If p is throttled, we do nothing. In fact, if it exhausted
984 * its budget it needs a replenishment and, since it now is on
985 * its rq, the bandwidth timer callback (which clearly has not
986 * run yet) will take care of this.
987 */
988 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
989 return;
990
991 enqueue_dl_entity(&p->dl, pi_se, flags);
992
993 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
994 enqueue_pushable_dl_task(rq, p);
995 }
996
__dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)997 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
998 {
999 dequeue_dl_entity(&p->dl);
1000 dequeue_pushable_dl_task(rq, p);
1001 }
1002
dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1003 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1004 {
1005 update_curr_dl(rq);
1006 __dequeue_task_dl(rq, p, flags);
1007 }
1008
1009 /*
1010 * Yield task semantic for -deadline tasks is:
1011 *
1012 * get off from the CPU until our next instance, with
1013 * a new runtime. This is of little use now, since we
1014 * don't have a bandwidth reclaiming mechanism. Anyway,
1015 * bandwidth reclaiming is planned for the future, and
1016 * yield_task_dl will indicate that some spare budget
1017 * is available for other task instances to use it.
1018 */
yield_task_dl(struct rq * rq)1019 static void yield_task_dl(struct rq *rq)
1020 {
1021 struct task_struct *p = rq->curr;
1022
1023 /*
1024 * We make the task go to sleep until its current deadline by
1025 * forcing its runtime to zero. This way, update_curr_dl() stops
1026 * it and the bandwidth timer will wake it up and will give it
1027 * new scheduling parameters (thanks to dl_yielded=1).
1028 */
1029 if (p->dl.runtime > 0) {
1030 rq->curr->dl.dl_yielded = 1;
1031 p->dl.runtime = 0;
1032 }
1033 update_curr_dl(rq);
1034 }
1035
1036 #ifdef CONFIG_SMP
1037
1038 static int find_later_rq(struct task_struct *task);
1039
1040 static int
select_task_rq_dl(struct task_struct * p,int cpu,int sd_flag,int flags)1041 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1042 {
1043 struct task_struct *curr;
1044 struct rq *rq;
1045
1046 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1047 goto out;
1048
1049 rq = cpu_rq(cpu);
1050
1051 rcu_read_lock();
1052 curr = READ_ONCE(rq->curr); /* unlocked access */
1053
1054 /*
1055 * If we are dealing with a -deadline task, we must
1056 * decide where to wake it up.
1057 * If it has a later deadline and the current task
1058 * on this rq can't move (provided the waking task
1059 * can!) we prefer to send it somewhere else. On the
1060 * other hand, if it has a shorter deadline, we
1061 * try to make it stay here, it might be important.
1062 */
1063 if (unlikely(dl_task(curr)) &&
1064 (curr->nr_cpus_allowed < 2 ||
1065 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1066 (p->nr_cpus_allowed > 1)) {
1067 int target = find_later_rq(p);
1068
1069 if (target != -1)
1070 cpu = target;
1071 }
1072 rcu_read_unlock();
1073
1074 out:
1075 return cpu;
1076 }
1077
check_preempt_equal_dl(struct rq * rq,struct task_struct * p)1078 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1079 {
1080 /*
1081 * Current can't be migrated, useless to reschedule,
1082 * let's hope p can move out.
1083 */
1084 if (rq->curr->nr_cpus_allowed == 1 ||
1085 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1086 return;
1087
1088 /*
1089 * p is migratable, so let's not schedule it and
1090 * see if it is pushed or pulled somewhere else.
1091 */
1092 if (p->nr_cpus_allowed != 1 &&
1093 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1094 return;
1095
1096 resched_curr(rq);
1097 }
1098
1099 static int pull_dl_task(struct rq *this_rq);
1100
1101 #endif /* CONFIG_SMP */
1102
1103 /*
1104 * Only called when both the current and waking task are -deadline
1105 * tasks.
1106 */
check_preempt_curr_dl(struct rq * rq,struct task_struct * p,int flags)1107 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1108 int flags)
1109 {
1110 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1111 resched_curr(rq);
1112 return;
1113 }
1114
1115 #ifdef CONFIG_SMP
1116 /*
1117 * In the unlikely case current and p have the same deadline
1118 * let us try to decide what's the best thing to do...
1119 */
1120 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1121 !test_tsk_need_resched(rq->curr))
1122 check_preempt_equal_dl(rq, p);
1123 #endif /* CONFIG_SMP */
1124 }
1125
1126 #ifdef CONFIG_SCHED_HRTICK
start_hrtick_dl(struct rq * rq,struct task_struct * p)1127 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1128 {
1129 hrtick_start(rq, p->dl.runtime);
1130 }
1131 #endif
1132
pick_next_dl_entity(struct rq * rq,struct dl_rq * dl_rq)1133 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1134 struct dl_rq *dl_rq)
1135 {
1136 struct rb_node *left = dl_rq->rb_leftmost;
1137
1138 if (!left)
1139 return NULL;
1140
1141 return rb_entry(left, struct sched_dl_entity, rb_node);
1142 }
1143
pick_next_task_dl(struct rq * rq,struct task_struct * prev)1144 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1145 {
1146 struct sched_dl_entity *dl_se;
1147 struct task_struct *p;
1148 struct dl_rq *dl_rq;
1149
1150 dl_rq = &rq->dl;
1151
1152 if (need_pull_dl_task(rq, prev)) {
1153 pull_dl_task(rq);
1154 /*
1155 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1156 * means a stop task can slip in, in which case we need to
1157 * re-start task selection.
1158 */
1159 if (rq->stop && task_on_rq_queued(rq->stop))
1160 return RETRY_TASK;
1161 }
1162
1163 /*
1164 * When prev is DL, we may throttle it in put_prev_task().
1165 * So, we update time before we check for dl_nr_running.
1166 */
1167 if (prev->sched_class == &dl_sched_class)
1168 update_curr_dl(rq);
1169
1170 if (unlikely(!dl_rq->dl_nr_running))
1171 return NULL;
1172
1173 put_prev_task(rq, prev);
1174
1175 dl_se = pick_next_dl_entity(rq, dl_rq);
1176 BUG_ON(!dl_se);
1177
1178 p = dl_task_of(dl_se);
1179 p->se.exec_start = rq_clock_task(rq);
1180
1181 /* Running task will never be pushed. */
1182 dequeue_pushable_dl_task(rq, p);
1183
1184 #ifdef CONFIG_SCHED_HRTICK
1185 if (hrtick_enabled(rq))
1186 start_hrtick_dl(rq, p);
1187 #endif
1188
1189 set_post_schedule(rq);
1190
1191 return p;
1192 }
1193
put_prev_task_dl(struct rq * rq,struct task_struct * p)1194 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1195 {
1196 update_curr_dl(rq);
1197
1198 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1199 enqueue_pushable_dl_task(rq, p);
1200 }
1201
task_tick_dl(struct rq * rq,struct task_struct * p,int queued)1202 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1203 {
1204 update_curr_dl(rq);
1205
1206 #ifdef CONFIG_SCHED_HRTICK
1207 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
1208 start_hrtick_dl(rq, p);
1209 #endif
1210 }
1211
task_fork_dl(struct task_struct * p)1212 static void task_fork_dl(struct task_struct *p)
1213 {
1214 /*
1215 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1216 * sched_fork()
1217 */
1218 }
1219
task_dead_dl(struct task_struct * p)1220 static void task_dead_dl(struct task_struct *p)
1221 {
1222 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1223 struct dl_rq *dl_rq = dl_rq_of_se(&p->dl);
1224 struct rq *rq = rq_of_dl_rq(dl_rq);
1225
1226 /*
1227 * Since we are TASK_DEAD we won't slip out of the domain!
1228 */
1229 raw_spin_lock_irq(&dl_b->lock);
1230 /* XXX we should retain the bw until 0-lag */
1231 dl_b->total_bw -= p->dl.dl_bw;
1232 raw_spin_unlock_irq(&dl_b->lock);
1233
1234 clear_average_bw(&p->dl, &rq->dl);
1235 }
1236
set_curr_task_dl(struct rq * rq)1237 static void set_curr_task_dl(struct rq *rq)
1238 {
1239 struct task_struct *p = rq->curr;
1240
1241 p->se.exec_start = rq_clock_task(rq);
1242
1243 /* You can't push away the running task */
1244 dequeue_pushable_dl_task(rq, p);
1245 }
1246
1247 #ifdef CONFIG_SMP
1248
1249 /* Only try algorithms three times */
1250 #define DL_MAX_TRIES 3
1251
pick_dl_task(struct rq * rq,struct task_struct * p,int cpu)1252 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1253 {
1254 if (!task_running(rq, p) &&
1255 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1256 return 1;
1257 return 0;
1258 }
1259
1260 /* Returns the second earliest -deadline task, NULL otherwise */
pick_next_earliest_dl_task(struct rq * rq,int cpu)1261 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1262 {
1263 struct rb_node *next_node = rq->dl.rb_leftmost;
1264 struct sched_dl_entity *dl_se;
1265 struct task_struct *p = NULL;
1266
1267 next_node:
1268 next_node = rb_next(next_node);
1269 if (next_node) {
1270 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1271 p = dl_task_of(dl_se);
1272
1273 if (pick_dl_task(rq, p, cpu))
1274 return p;
1275
1276 goto next_node;
1277 }
1278
1279 return NULL;
1280 }
1281
1282 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1283
find_later_rq(struct task_struct * task)1284 static int find_later_rq(struct task_struct *task)
1285 {
1286 struct sched_domain *sd;
1287 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1288 int this_cpu = smp_processor_id();
1289 int best_cpu, cpu = task_cpu(task);
1290
1291 /* Make sure the mask is initialized first */
1292 if (unlikely(!later_mask))
1293 return -1;
1294
1295 if (task->nr_cpus_allowed == 1)
1296 return -1;
1297
1298 /*
1299 * We have to consider system topology and task affinity
1300 * first, then we can look for a suitable cpu.
1301 */
1302 cpumask_copy(later_mask, task_rq(task)->rd->span);
1303 cpumask_and(later_mask, later_mask, cpu_active_mask);
1304 cpumask_and(later_mask, later_mask, &task->cpus_allowed);
1305 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1306 task, later_mask);
1307 if (best_cpu == -1)
1308 return -1;
1309
1310 /*
1311 * If we are here, some target has been found,
1312 * the most suitable of which is cached in best_cpu.
1313 * This is, among the runqueues where the current tasks
1314 * have later deadlines than the task's one, the rq
1315 * with the latest possible one.
1316 *
1317 * Now we check how well this matches with task's
1318 * affinity and system topology.
1319 *
1320 * The last cpu where the task run is our first
1321 * guess, since it is most likely cache-hot there.
1322 */
1323 if (cpumask_test_cpu(cpu, later_mask))
1324 return cpu;
1325 /*
1326 * Check if this_cpu is to be skipped (i.e., it is
1327 * not in the mask) or not.
1328 */
1329 if (!cpumask_test_cpu(this_cpu, later_mask))
1330 this_cpu = -1;
1331
1332 rcu_read_lock();
1333 for_each_domain(cpu, sd) {
1334 if (sd->flags & SD_WAKE_AFFINE) {
1335
1336 /*
1337 * If possible, preempting this_cpu is
1338 * cheaper than migrating.
1339 */
1340 if (this_cpu != -1 &&
1341 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1342 rcu_read_unlock();
1343 return this_cpu;
1344 }
1345
1346 /*
1347 * Last chance: if best_cpu is valid and is
1348 * in the mask, that becomes our choice.
1349 */
1350 if (best_cpu < nr_cpu_ids &&
1351 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1352 rcu_read_unlock();
1353 return best_cpu;
1354 }
1355 }
1356 }
1357 rcu_read_unlock();
1358
1359 /*
1360 * At this point, all our guesses failed, we just return
1361 * 'something', and let the caller sort the things out.
1362 */
1363 if (this_cpu != -1)
1364 return this_cpu;
1365
1366 cpu = cpumask_any(later_mask);
1367 if (cpu < nr_cpu_ids)
1368 return cpu;
1369
1370 return -1;
1371 }
1372
1373 /* Locks the rq it finds */
find_lock_later_rq(struct task_struct * task,struct rq * rq)1374 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1375 {
1376 struct rq *later_rq = NULL;
1377 int tries;
1378 int cpu;
1379
1380 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1381 cpu = find_later_rq(task);
1382
1383 if ((cpu == -1) || (cpu == rq->cpu))
1384 break;
1385
1386 later_rq = cpu_rq(cpu);
1387
1388 /* Retry if something changed. */
1389 if (double_lock_balance(rq, later_rq)) {
1390 if (unlikely(task_rq(task) != rq ||
1391 !cpumask_test_cpu(later_rq->cpu,
1392 &task->cpus_allowed) ||
1393 task_running(rq, task) ||
1394 !task_on_rq_queued(task))) {
1395 double_unlock_balance(rq, later_rq);
1396 later_rq = NULL;
1397 break;
1398 }
1399 }
1400
1401 /*
1402 * If the rq we found has no -deadline task, or
1403 * its earliest one has a later deadline than our
1404 * task, the rq is a good one.
1405 */
1406 if (!later_rq->dl.dl_nr_running ||
1407 dl_time_before(task->dl.deadline,
1408 later_rq->dl.earliest_dl.curr))
1409 break;
1410
1411 /* Otherwise we try again. */
1412 double_unlock_balance(rq, later_rq);
1413 later_rq = NULL;
1414 }
1415
1416 return later_rq;
1417 }
1418
pick_next_pushable_dl_task(struct rq * rq)1419 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1420 {
1421 struct task_struct *p;
1422
1423 if (!has_pushable_dl_tasks(rq))
1424 return NULL;
1425
1426 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1427 struct task_struct, pushable_dl_tasks);
1428
1429 BUG_ON(rq->cpu != task_cpu(p));
1430 BUG_ON(task_current(rq, p));
1431 BUG_ON(p->nr_cpus_allowed <= 1);
1432
1433 BUG_ON(!task_on_rq_queued(p));
1434 BUG_ON(!dl_task(p));
1435
1436 return p;
1437 }
1438
1439 /*
1440 * See if the non running -deadline tasks on this rq
1441 * can be sent to some other CPU where they can preempt
1442 * and start executing.
1443 */
push_dl_task(struct rq * rq)1444 static int push_dl_task(struct rq *rq)
1445 {
1446 struct task_struct *next_task;
1447 struct rq *later_rq;
1448
1449 if (!rq->dl.overloaded)
1450 return 0;
1451
1452 next_task = pick_next_pushable_dl_task(rq);
1453 if (!next_task)
1454 return 0;
1455
1456 retry:
1457 if (unlikely(next_task == rq->curr)) {
1458 WARN_ON(1);
1459 return 0;
1460 }
1461
1462 /*
1463 * If next_task preempts rq->curr, and rq->curr
1464 * can move away, it makes sense to just reschedule
1465 * without going further in pushing next_task.
1466 */
1467 if (dl_task(rq->curr) &&
1468 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1469 rq->curr->nr_cpus_allowed > 1) {
1470 resched_curr(rq);
1471 return 0;
1472 }
1473
1474 /* We might release rq lock */
1475 get_task_struct(next_task);
1476
1477 /* Will lock the rq it'll find */
1478 later_rq = find_lock_later_rq(next_task, rq);
1479 if (!later_rq) {
1480 struct task_struct *task;
1481
1482 /*
1483 * We must check all this again, since
1484 * find_lock_later_rq releases rq->lock and it is
1485 * then possible that next_task has migrated.
1486 */
1487 task = pick_next_pushable_dl_task(rq);
1488 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1489 /*
1490 * The task is still there. We don't try
1491 * again, some other cpu will pull it when ready.
1492 */
1493 dequeue_pushable_dl_task(rq, next_task);
1494 goto out;
1495 }
1496
1497 if (!task)
1498 /* No more tasks */
1499 goto out;
1500
1501 put_task_struct(next_task);
1502 next_task = task;
1503 goto retry;
1504 }
1505
1506 deactivate_task(rq, next_task, 0);
1507 clear_average_bw(&next_task->dl, &rq->dl);
1508 set_task_cpu(next_task, later_rq->cpu);
1509 add_average_bw(&next_task->dl, &later_rq->dl);
1510 activate_task(later_rq, next_task, 0);
1511
1512 resched_curr(later_rq);
1513
1514 double_unlock_balance(rq, later_rq);
1515
1516 out:
1517 put_task_struct(next_task);
1518
1519 return 1;
1520 }
1521
push_dl_tasks(struct rq * rq)1522 static void push_dl_tasks(struct rq *rq)
1523 {
1524 /* Terminates as it moves a -deadline task */
1525 while (push_dl_task(rq))
1526 ;
1527 }
1528
pull_dl_task(struct rq * this_rq)1529 static int pull_dl_task(struct rq *this_rq)
1530 {
1531 int this_cpu = this_rq->cpu, ret = 0, cpu;
1532 struct task_struct *p;
1533 struct rq *src_rq;
1534 u64 dmin = LONG_MAX;
1535
1536 if (likely(!dl_overloaded(this_rq)))
1537 return 0;
1538
1539 /*
1540 * Match the barrier from dl_set_overloaded; this guarantees that if we
1541 * see overloaded we must also see the dlo_mask bit.
1542 */
1543 smp_rmb();
1544
1545 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1546 if (this_cpu == cpu)
1547 continue;
1548
1549 src_rq = cpu_rq(cpu);
1550
1551 /*
1552 * It looks racy, abd it is! However, as in sched_rt.c,
1553 * we are fine with this.
1554 */
1555 if (this_rq->dl.dl_nr_running &&
1556 dl_time_before(this_rq->dl.earliest_dl.curr,
1557 src_rq->dl.earliest_dl.next))
1558 continue;
1559
1560 /* Might drop this_rq->lock */
1561 double_lock_balance(this_rq, src_rq);
1562
1563 /*
1564 * If there are no more pullable tasks on the
1565 * rq, we're done with it.
1566 */
1567 if (src_rq->dl.dl_nr_running <= 1)
1568 goto skip;
1569
1570 p = pick_next_earliest_dl_task(src_rq, this_cpu);
1571
1572 /*
1573 * We found a task to be pulled if:
1574 * - it preempts our current (if there's one),
1575 * - it will preempt the last one we pulled (if any).
1576 */
1577 if (p && dl_time_before(p->dl.deadline, dmin) &&
1578 (!this_rq->dl.dl_nr_running ||
1579 dl_time_before(p->dl.deadline,
1580 this_rq->dl.earliest_dl.curr))) {
1581 WARN_ON(p == src_rq->curr);
1582 WARN_ON(!task_on_rq_queued(p));
1583
1584 /*
1585 * Then we pull iff p has actually an earlier
1586 * deadline than the current task of its runqueue.
1587 */
1588 if (dl_time_before(p->dl.deadline,
1589 src_rq->curr->dl.deadline))
1590 goto skip;
1591
1592 ret = 1;
1593
1594 deactivate_task(src_rq, p, 0);
1595 clear_average_bw(&p->dl, &src_rq->dl);
1596 set_task_cpu(p, this_cpu);
1597 add_average_bw(&p->dl, &this_rq->dl);
1598 activate_task(this_rq, p, 0);
1599 dmin = p->dl.deadline;
1600
1601 /* Is there any other task even earlier? */
1602 }
1603 skip:
1604 double_unlock_balance(this_rq, src_rq);
1605 }
1606
1607 return ret;
1608 }
1609
post_schedule_dl(struct rq * rq)1610 static void post_schedule_dl(struct rq *rq)
1611 {
1612 push_dl_tasks(rq);
1613 }
1614
1615 /*
1616 * Since the task is not running and a reschedule is not going to happen
1617 * anytime soon on its runqueue, we try pushing it away now.
1618 */
task_woken_dl(struct rq * rq,struct task_struct * p)1619 static void task_woken_dl(struct rq *rq, struct task_struct *p)
1620 {
1621 if (!task_running(rq, p) &&
1622 !test_tsk_need_resched(rq->curr) &&
1623 has_pushable_dl_tasks(rq) &&
1624 p->nr_cpus_allowed > 1 &&
1625 dl_task(rq->curr) &&
1626 (rq->curr->nr_cpus_allowed < 2 ||
1627 dl_entity_preempt(&rq->curr->dl, &p->dl))) {
1628 push_dl_tasks(rq);
1629 }
1630 }
1631
set_cpus_allowed_dl(struct task_struct * p,const struct cpumask * new_mask)1632 static void set_cpus_allowed_dl(struct task_struct *p,
1633 const struct cpumask *new_mask)
1634 {
1635 struct rq *rq;
1636 int weight;
1637
1638 BUG_ON(!dl_task(p));
1639
1640 /*
1641 * Update only if the task is actually running (i.e.,
1642 * it is on the rq AND it is not throttled).
1643 */
1644 if (!on_dl_rq(&p->dl))
1645 return;
1646
1647 weight = cpumask_weight(new_mask);
1648
1649 /*
1650 * Only update if the process changes its state from whether it
1651 * can migrate or not.
1652 */
1653 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1654 return;
1655
1656 rq = task_rq(p);
1657
1658 /*
1659 * The process used to be able to migrate OR it can now migrate
1660 */
1661 if (weight <= 1) {
1662 if (!task_current(rq, p))
1663 dequeue_pushable_dl_task(rq, p);
1664 BUG_ON(!rq->dl.dl_nr_migratory);
1665 rq->dl.dl_nr_migratory--;
1666 } else {
1667 if (!task_current(rq, p))
1668 enqueue_pushable_dl_task(rq, p);
1669 rq->dl.dl_nr_migratory++;
1670 }
1671
1672 update_dl_migration(&rq->dl);
1673 }
1674
1675 /* Assumes rq->lock is held */
rq_online_dl(struct rq * rq)1676 static void rq_online_dl(struct rq *rq)
1677 {
1678 if (rq->dl.overloaded)
1679 dl_set_overload(rq);
1680
1681 if (rq->dl.dl_nr_running > 0)
1682 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1683 }
1684
1685 /* Assumes rq->lock is held */
rq_offline_dl(struct rq * rq)1686 static void rq_offline_dl(struct rq *rq)
1687 {
1688 if (rq->dl.overloaded)
1689 dl_clear_overload(rq);
1690
1691 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1692 }
1693
init_sched_dl_class(void)1694 void init_sched_dl_class(void)
1695 {
1696 unsigned int i;
1697
1698 for_each_possible_cpu(i)
1699 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1700 GFP_KERNEL, cpu_to_node(i));
1701 }
1702
1703 #endif /* CONFIG_SMP */
1704
switched_from_dl(struct rq * rq,struct task_struct * p)1705 static void switched_from_dl(struct rq *rq, struct task_struct *p)
1706 {
1707 /*
1708 * Start the deadline timer; if we switch back to dl before this we'll
1709 * continue consuming our current CBS slice. If we stay outside of
1710 * SCHED_DEADLINE until the deadline passes, the timer will reset the
1711 * task.
1712 */
1713 if (!start_dl_timer(p))
1714 __dl_clear_params(p);
1715
1716 clear_average_bw(&p->dl, &rq->dl);
1717
1718 /*
1719 * Since this might be the only -deadline task on the rq,
1720 * this is the right place to try to pull some other one
1721 * from an overloaded cpu, if any.
1722 */
1723 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1724 return;
1725
1726 if (pull_dl_task(rq))
1727 resched_curr(rq);
1728 }
1729
1730 /*
1731 * When switching to -deadline, we may overload the rq, then
1732 * we try to push someone off, if possible.
1733 */
switched_to_dl(struct rq * rq,struct task_struct * p)1734 static void switched_to_dl(struct rq *rq, struct task_struct *p)
1735 {
1736 int check_resched = 1;
1737
1738 /*
1739 * If p is throttled, don't consider the possibility
1740 * of preempting rq->curr, the check will be done right
1741 * after its runtime will get replenished.
1742 */
1743 if (unlikely(p->dl.dl_throttled))
1744 return;
1745
1746 if (task_on_rq_queued(p) && rq->curr != p) {
1747 #ifdef CONFIG_SMP
1748 if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
1749 /* Only reschedule if pushing failed */
1750 check_resched = 0;
1751 #endif /* CONFIG_SMP */
1752 if (check_resched) {
1753 if (dl_task(rq->curr))
1754 check_preempt_curr_dl(rq, p, 0);
1755 else
1756 resched_curr(rq);
1757 }
1758 }
1759 }
1760
1761 /*
1762 * If the scheduling parameters of a -deadline task changed,
1763 * a push or pull operation might be needed.
1764 */
prio_changed_dl(struct rq * rq,struct task_struct * p,int oldprio)1765 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1766 int oldprio)
1767 {
1768 if (task_on_rq_queued(p) || rq->curr == p) {
1769 #ifdef CONFIG_SMP
1770 /*
1771 * This might be too much, but unfortunately
1772 * we don't have the old deadline value, and
1773 * we can't argue if the task is increasing
1774 * or lowering its prio, so...
1775 */
1776 if (!rq->dl.overloaded)
1777 pull_dl_task(rq);
1778
1779 /*
1780 * If we now have a earlier deadline task than p,
1781 * then reschedule, provided p is still on this
1782 * runqueue.
1783 */
1784 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
1785 rq->curr == p)
1786 resched_curr(rq);
1787 #else
1788 /*
1789 * Again, we don't know if p has a earlier
1790 * or later deadline, so let's blindly set a
1791 * (maybe not needed) rescheduling point.
1792 */
1793 resched_curr(rq);
1794 #endif /* CONFIG_SMP */
1795 } else
1796 switched_to_dl(rq, p);
1797 }
1798
1799 const struct sched_class dl_sched_class = {
1800 .next = &rt_sched_class,
1801 .enqueue_task = enqueue_task_dl,
1802 .dequeue_task = dequeue_task_dl,
1803 .yield_task = yield_task_dl,
1804
1805 .check_preempt_curr = check_preempt_curr_dl,
1806
1807 .pick_next_task = pick_next_task_dl,
1808 .put_prev_task = put_prev_task_dl,
1809
1810 #ifdef CONFIG_SMP
1811 .select_task_rq = select_task_rq_dl,
1812 .set_cpus_allowed = set_cpus_allowed_dl,
1813 .rq_online = rq_online_dl,
1814 .rq_offline = rq_offline_dl,
1815 .post_schedule = post_schedule_dl,
1816 .task_woken = task_woken_dl,
1817 #endif
1818
1819 .set_curr_task = set_curr_task_dl,
1820 .task_tick = task_tick_dl,
1821 .task_fork = task_fork_dl,
1822 .task_dead = task_dead_dl,
1823
1824 .prio_changed = prio_changed_dl,
1825 .switched_from = switched_from_dl,
1826 .switched_to = switched_to_dl,
1827
1828 .update_curr = update_curr_dl,
1829 };
1830