• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Deadline Scheduling Class (SCHED_DEADLINE)
4  *
5  * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6  *
7  * Tasks that periodically executes their instances for less than their
8  * runtime won't miss any of their deadlines.
9  * Tasks that are not periodic or sporadic or that tries to execute more
10  * than their reserved bandwidth will be slowed down (and may potentially
11  * miss some of their deadlines), and won't affect any other task.
12  *
13  * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14  *                    Juri Lelli <juri.lelli@gmail.com>,
15  *                    Michael Trimarchi <michael@amarulasolutions.com>,
16  *                    Fabio Checconi <fchecconi@gmail.com>
17  */
18 #include "sched.h"
19 
20 #include <linux/slab.h>
21 #include <uapi/linux/sched/types.h>
22 
23 #include "walt.h"
24 
25 struct dl_bandwidth def_dl_bandwidth;
26 
dl_task_of(struct sched_dl_entity * dl_se)27 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
28 {
29 	return container_of(dl_se, struct task_struct, dl);
30 }
31 
rq_of_dl_rq(struct dl_rq * dl_rq)32 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
33 {
34 	return container_of(dl_rq, struct rq, dl);
35 }
36 
dl_rq_of_se(struct sched_dl_entity * dl_se)37 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
38 {
39 	struct task_struct *p = dl_task_of(dl_se);
40 	struct rq *rq = task_rq(p);
41 
42 	return &rq->dl;
43 }
44 
on_dl_rq(struct sched_dl_entity * dl_se)45 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
46 {
47 	return !RB_EMPTY_NODE(&dl_se->rb_node);
48 }
49 
50 #ifdef CONFIG_SMP
dl_bw_of(int i)51 static inline struct dl_bw *dl_bw_of(int i)
52 {
53 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
54 			 "sched RCU must be held");
55 	return &cpu_rq(i)->rd->dl_bw;
56 }
57 
dl_bw_cpus(int i)58 static inline int dl_bw_cpus(int i)
59 {
60 	struct root_domain *rd = cpu_rq(i)->rd;
61 	int cpus = 0;
62 
63 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
64 			 "sched RCU must be held");
65 	for_each_cpu_and(i, rd->span, cpu_active_mask)
66 		cpus++;
67 
68 	return cpus;
69 }
70 #else
dl_bw_of(int i)71 static inline struct dl_bw *dl_bw_of(int i)
72 {
73 	return &cpu_rq(i)->dl.dl_bw;
74 }
75 
dl_bw_cpus(int i)76 static inline int dl_bw_cpus(int i)
77 {
78 	return 1;
79 }
80 #endif
81 
82 static inline
add_running_bw(u64 dl_bw,struct dl_rq * dl_rq)83 void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
84 {
85 	u64 old = dl_rq->running_bw;
86 
87 	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
88 	dl_rq->running_bw += dl_bw;
89 	SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
90 	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
91 }
92 
93 static inline
sub_running_bw(u64 dl_bw,struct dl_rq * dl_rq)94 void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
95 {
96 	u64 old = dl_rq->running_bw;
97 
98 	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
99 	dl_rq->running_bw -= dl_bw;
100 	SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
101 	if (dl_rq->running_bw > old)
102 		dl_rq->running_bw = 0;
103 }
104 
105 static inline
add_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)106 void add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
107 {
108 	u64 old = dl_rq->this_bw;
109 
110 	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
111 	dl_rq->this_bw += dl_bw;
112 	SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
113 }
114 
115 static inline
sub_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)116 void sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
117 {
118 	u64 old = dl_rq->this_bw;
119 
120 	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
121 	dl_rq->this_bw -= dl_bw;
122 	SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
123 	if (dl_rq->this_bw > old)
124 		dl_rq->this_bw = 0;
125 	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
126 }
127 
dl_change_utilization(struct task_struct * p,u64 new_bw)128 void dl_change_utilization(struct task_struct *p, u64 new_bw)
129 {
130 	struct rq *rq;
131 
132 	if (task_on_rq_queued(p))
133 		return;
134 
135 	rq = task_rq(p);
136 	if (p->dl.dl_non_contending) {
137 		sub_running_bw(p->dl.dl_bw, &rq->dl);
138 		p->dl.dl_non_contending = 0;
139 		/*
140 		 * If the timer handler is currently running and the
141 		 * timer cannot be cancelled, inactive_task_timer()
142 		 * will see that dl_not_contending is not set, and
143 		 * will not touch the rq's active utilization,
144 		 * so we are still safe.
145 		 */
146 		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
147 			put_task_struct(p);
148 	}
149 	sub_rq_bw(p->dl.dl_bw, &rq->dl);
150 	add_rq_bw(new_bw, &rq->dl);
151 }
152 
153 /*
154  * The utilization of a task cannot be immediately removed from
155  * the rq active utilization (running_bw) when the task blocks.
156  * Instead, we have to wait for the so called "0-lag time".
157  *
158  * If a task blocks before the "0-lag time", a timer (the inactive
159  * timer) is armed, and running_bw is decreased when the timer
160  * fires.
161  *
162  * If the task wakes up again before the inactive timer fires,
163  * the timer is cancelled, whereas if the task wakes up after the
164  * inactive timer fired (and running_bw has been decreased) the
165  * task's utilization has to be added to running_bw again.
166  * A flag in the deadline scheduling entity (dl_non_contending)
167  * is used to avoid race conditions between the inactive timer handler
168  * and task wakeups.
169  *
170  * The following diagram shows how running_bw is updated. A task is
171  * "ACTIVE" when its utilization contributes to running_bw; an
172  * "ACTIVE contending" task is in the TASK_RUNNING state, while an
173  * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
174  * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
175  * time already passed, which does not contribute to running_bw anymore.
176  *                              +------------------+
177  *             wakeup           |    ACTIVE        |
178  *          +------------------>+   contending     |
179  *          | add_running_bw    |                  |
180  *          |                   +----+------+------+
181  *          |                        |      ^
182  *          |                dequeue |      |
183  * +--------+-------+                |      |
184  * |                |   t >= 0-lag   |      | wakeup
185  * |    INACTIVE    |<---------------+      |
186  * |                | sub_running_bw |      |
187  * +--------+-------+                |      |
188  *          ^                        |      |
189  *          |              t < 0-lag |      |
190  *          |                        |      |
191  *          |                        V      |
192  *          |                   +----+------+------+
193  *          | sub_running_bw    |    ACTIVE        |
194  *          +-------------------+                  |
195  *            inactive timer    |  non contending  |
196  *            fired             +------------------+
197  *
198  * The task_non_contending() function is invoked when a task
199  * blocks, and checks if the 0-lag time already passed or
200  * not (in the first case, it directly updates running_bw;
201  * in the second case, it arms the inactive timer).
202  *
203  * The task_contending() function is invoked when a task wakes
204  * up, and checks if the task is still in the "ACTIVE non contending"
205  * state or not (in the second case, it updates running_bw).
206  */
task_non_contending(struct task_struct * p)207 static void task_non_contending(struct task_struct *p)
208 {
209 	struct sched_dl_entity *dl_se = &p->dl;
210 	struct hrtimer *timer = &dl_se->inactive_timer;
211 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
212 	struct rq *rq = rq_of_dl_rq(dl_rq);
213 	s64 zerolag_time;
214 
215 	/*
216 	 * If this is a non-deadline task that has been boosted,
217 	 * do nothing
218 	 */
219 	if (dl_se->dl_runtime == 0)
220 		return;
221 
222 	WARN_ON(dl_se->dl_non_contending);
223 
224 	zerolag_time = dl_se->deadline -
225 		 div64_long((dl_se->runtime * dl_se->dl_period),
226 			dl_se->dl_runtime);
227 
228 	/*
229 	 * Using relative times instead of the absolute "0-lag time"
230 	 * allows to simplify the code
231 	 */
232 	zerolag_time -= rq_clock(rq);
233 
234 	/*
235 	 * If the "0-lag time" already passed, decrease the active
236 	 * utilization now, instead of starting a timer
237 	 */
238 	if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
239 		if (dl_task(p))
240 			sub_running_bw(dl_se->dl_bw, dl_rq);
241 		if (!dl_task(p) || p->state == TASK_DEAD) {
242 			struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
243 
244 			if (p->state == TASK_DEAD)
245 				sub_rq_bw(p->dl.dl_bw, &rq->dl);
246 			raw_spin_lock(&dl_b->lock);
247 			__dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
248 			__dl_clear_params(p);
249 			raw_spin_unlock(&dl_b->lock);
250 		}
251 
252 		return;
253 	}
254 
255 	dl_se->dl_non_contending = 1;
256 	get_task_struct(p);
257 	hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
258 }
259 
task_contending(struct sched_dl_entity * dl_se,int flags)260 static void task_contending(struct sched_dl_entity *dl_se, int flags)
261 {
262 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
263 
264 	/*
265 	 * If this is a non-deadline task that has been boosted,
266 	 * do nothing
267 	 */
268 	if (dl_se->dl_runtime == 0)
269 		return;
270 
271 	if (flags & ENQUEUE_MIGRATED)
272 		add_rq_bw(dl_se->dl_bw, dl_rq);
273 
274 	if (dl_se->dl_non_contending) {
275 		dl_se->dl_non_contending = 0;
276 		/*
277 		 * If the timer handler is currently running and the
278 		 * timer cannot be cancelled, inactive_task_timer()
279 		 * will see that dl_not_contending is not set, and
280 		 * will not touch the rq's active utilization,
281 		 * so we are still safe.
282 		 */
283 		if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
284 			put_task_struct(dl_task_of(dl_se));
285 	} else {
286 		/*
287 		 * Since "dl_non_contending" is not set, the
288 		 * task's utilization has already been removed from
289 		 * active utilization (either when the task blocked,
290 		 * when the "inactive timer" fired).
291 		 * So, add it back.
292 		 */
293 		add_running_bw(dl_se->dl_bw, dl_rq);
294 	}
295 }
296 
is_leftmost(struct task_struct * p,struct dl_rq * dl_rq)297 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
298 {
299 	struct sched_dl_entity *dl_se = &p->dl;
300 
301 	return dl_rq->root.rb_leftmost == &dl_se->rb_node;
302 }
303 
init_dl_bandwidth(struct dl_bandwidth * dl_b,u64 period,u64 runtime)304 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
305 {
306 	raw_spin_lock_init(&dl_b->dl_runtime_lock);
307 	dl_b->dl_period = period;
308 	dl_b->dl_runtime = runtime;
309 }
310 
init_dl_bw(struct dl_bw * dl_b)311 void init_dl_bw(struct dl_bw *dl_b)
312 {
313 	raw_spin_lock_init(&dl_b->lock);
314 	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
315 	if (global_rt_runtime() == RUNTIME_INF)
316 		dl_b->bw = -1;
317 	else
318 		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
319 	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
320 	dl_b->total_bw = 0;
321 }
322 
init_dl_rq(struct dl_rq * dl_rq)323 void init_dl_rq(struct dl_rq *dl_rq)
324 {
325 	dl_rq->root = RB_ROOT_CACHED;
326 
327 #ifdef CONFIG_SMP
328 	/* zero means no -deadline tasks */
329 	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
330 
331 	dl_rq->dl_nr_migratory = 0;
332 	dl_rq->overloaded = 0;
333 	dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
334 #else
335 	init_dl_bw(&dl_rq->dl_bw);
336 #endif
337 
338 	dl_rq->running_bw = 0;
339 	dl_rq->this_bw = 0;
340 	init_dl_rq_bw_ratio(dl_rq);
341 }
342 
343 #ifdef CONFIG_SMP
344 
dl_overloaded(struct rq * rq)345 static inline int dl_overloaded(struct rq *rq)
346 {
347 	return atomic_read(&rq->rd->dlo_count);
348 }
349 
dl_set_overload(struct rq * rq)350 static inline void dl_set_overload(struct rq *rq)
351 {
352 	if (!rq->online)
353 		return;
354 
355 	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
356 	/*
357 	 * Must be visible before the overload count is
358 	 * set (as in sched_rt.c).
359 	 *
360 	 * Matched by the barrier in pull_dl_task().
361 	 */
362 	smp_wmb();
363 	atomic_inc(&rq->rd->dlo_count);
364 }
365 
dl_clear_overload(struct rq * rq)366 static inline void dl_clear_overload(struct rq *rq)
367 {
368 	if (!rq->online)
369 		return;
370 
371 	atomic_dec(&rq->rd->dlo_count);
372 	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
373 }
374 
update_dl_migration(struct dl_rq * dl_rq)375 static void update_dl_migration(struct dl_rq *dl_rq)
376 {
377 	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
378 		if (!dl_rq->overloaded) {
379 			dl_set_overload(rq_of_dl_rq(dl_rq));
380 			dl_rq->overloaded = 1;
381 		}
382 	} else if (dl_rq->overloaded) {
383 		dl_clear_overload(rq_of_dl_rq(dl_rq));
384 		dl_rq->overloaded = 0;
385 	}
386 }
387 
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)388 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
389 {
390 	struct task_struct *p = dl_task_of(dl_se);
391 
392 	if (p->nr_cpus_allowed > 1)
393 		dl_rq->dl_nr_migratory++;
394 
395 	update_dl_migration(dl_rq);
396 }
397 
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)398 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
399 {
400 	struct task_struct *p = dl_task_of(dl_se);
401 
402 	if (p->nr_cpus_allowed > 1)
403 		dl_rq->dl_nr_migratory--;
404 
405 	update_dl_migration(dl_rq);
406 }
407 
408 /*
409  * The list of pushable -deadline task is not a plist, like in
410  * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
411  */
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)412 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
413 {
414 	struct dl_rq *dl_rq = &rq->dl;
415 	struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
416 	struct rb_node *parent = NULL;
417 	struct task_struct *entry;
418 	bool leftmost = true;
419 
420 	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
421 
422 	while (*link) {
423 		parent = *link;
424 		entry = rb_entry(parent, struct task_struct,
425 				 pushable_dl_tasks);
426 		if (dl_entity_preempt(&p->dl, &entry->dl))
427 			link = &parent->rb_left;
428 		else {
429 			link = &parent->rb_right;
430 			leftmost = false;
431 		}
432 	}
433 
434 	if (leftmost)
435 		dl_rq->earliest_dl.next = p->dl.deadline;
436 
437 	rb_link_node(&p->pushable_dl_tasks, parent, link);
438 	rb_insert_color_cached(&p->pushable_dl_tasks,
439 			       &dl_rq->pushable_dl_tasks_root, leftmost);
440 }
441 
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)442 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
443 {
444 	struct dl_rq *dl_rq = &rq->dl;
445 
446 	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
447 		return;
448 
449 	if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
450 		struct rb_node *next_node;
451 
452 		next_node = rb_next(&p->pushable_dl_tasks);
453 		if (next_node) {
454 			dl_rq->earliest_dl.next = rb_entry(next_node,
455 				struct task_struct, pushable_dl_tasks)->dl.deadline;
456 		}
457 	}
458 
459 	rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
460 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
461 }
462 
has_pushable_dl_tasks(struct rq * rq)463 static inline int has_pushable_dl_tasks(struct rq *rq)
464 {
465 	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
466 }
467 
468 static int push_dl_task(struct rq *rq);
469 
need_pull_dl_task(struct rq * rq,struct task_struct * prev)470 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
471 {
472 	return dl_task(prev);
473 }
474 
475 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
476 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
477 
478 static void push_dl_tasks(struct rq *);
479 static void pull_dl_task(struct rq *);
480 
queue_push_tasks(struct rq * rq)481 static inline void queue_push_tasks(struct rq *rq)
482 {
483 	if (!has_pushable_dl_tasks(rq))
484 		return;
485 
486 	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
487 }
488 
queue_pull_task(struct rq * rq)489 static inline void queue_pull_task(struct rq *rq)
490 {
491 	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
492 }
493 
494 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
495 
dl_task_offline_migration(struct rq * rq,struct task_struct * p)496 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
497 {
498 	struct rq *later_rq = NULL;
499 
500 	later_rq = find_lock_later_rq(p, rq);
501 	if (!later_rq) {
502 		int cpu;
503 
504 		/*
505 		 * If we cannot preempt any rq, fall back to pick any
506 		 * online cpu.
507 		 */
508 		cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
509 		if (cpu >= nr_cpu_ids) {
510 			/*
511 			 * Fail to find any suitable cpu.
512 			 * The task will never come back!
513 			 */
514 			BUG_ON(dl_bandwidth_enabled());
515 
516 			/*
517 			 * If admission control is disabled we
518 			 * try a little harder to let the task
519 			 * run.
520 			 */
521 			cpu = cpumask_any(cpu_active_mask);
522 		}
523 		later_rq = cpu_rq(cpu);
524 		double_lock_balance(rq, later_rq);
525 	}
526 
527 	set_task_cpu(p, later_rq->cpu);
528 	double_unlock_balance(later_rq, rq);
529 
530 	return later_rq;
531 }
532 
533 #else
534 
535 static inline
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)536 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
537 {
538 }
539 
540 static inline
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)541 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
542 {
543 }
544 
545 static inline
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)546 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
547 {
548 }
549 
550 static inline
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)551 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
552 {
553 }
554 
need_pull_dl_task(struct rq * rq,struct task_struct * prev)555 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
556 {
557 	return false;
558 }
559 
pull_dl_task(struct rq * rq)560 static inline void pull_dl_task(struct rq *rq)
561 {
562 }
563 
queue_push_tasks(struct rq * rq)564 static inline void queue_push_tasks(struct rq *rq)
565 {
566 }
567 
queue_pull_task(struct rq * rq)568 static inline void queue_pull_task(struct rq *rq)
569 {
570 }
571 #endif /* CONFIG_SMP */
572 
573 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
574 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
575 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
576 				  int flags);
577 
578 /*
579  * We are being explicitly informed that a new instance is starting,
580  * and this means that:
581  *  - the absolute deadline of the entity has to be placed at
582  *    current time + relative deadline;
583  *  - the runtime of the entity has to be set to the maximum value.
584  *
585  * The capability of specifying such event is useful whenever a -deadline
586  * entity wants to (try to!) synchronize its behaviour with the scheduler's
587  * one, and to (try to!) reconcile itself with its own scheduling
588  * parameters.
589  */
setup_new_dl_entity(struct sched_dl_entity * dl_se)590 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
591 {
592 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
593 	struct rq *rq = rq_of_dl_rq(dl_rq);
594 
595 	WARN_ON(dl_se->dl_boosted);
596 	WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
597 
598 	/*
599 	 * We are racing with the deadline timer. So, do nothing because
600 	 * the deadline timer handler will take care of properly recharging
601 	 * the runtime and postponing the deadline
602 	 */
603 	if (dl_se->dl_throttled)
604 		return;
605 
606 	/*
607 	 * We use the regular wall clock time to set deadlines in the
608 	 * future; in fact, we must consider execution overheads (time
609 	 * spent on hardirq context, etc.).
610 	 */
611 	dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
612 	dl_se->runtime = dl_se->dl_runtime;
613 }
614 
615 /*
616  * Pure Earliest Deadline First (EDF) scheduling does not deal with the
617  * possibility of a entity lasting more than what it declared, and thus
618  * exhausting its runtime.
619  *
620  * Here we are interested in making runtime overrun possible, but we do
621  * not want a entity which is misbehaving to affect the scheduling of all
622  * other entities.
623  * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
624  * is used, in order to confine each entity within its own bandwidth.
625  *
626  * This function deals exactly with that, and ensures that when the runtime
627  * of a entity is replenished, its deadline is also postponed. That ensures
628  * the overrunning entity can't interfere with other entity in the system and
629  * can't make them miss their deadlines. Reasons why this kind of overruns
630  * could happen are, typically, a entity voluntarily trying to overcome its
631  * runtime, or it just underestimated it during sched_setattr().
632  */
replenish_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se)633 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
634 				struct sched_dl_entity *pi_se)
635 {
636 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
637 	struct rq *rq = rq_of_dl_rq(dl_rq);
638 
639 	BUG_ON(pi_se->dl_runtime <= 0);
640 
641 	/*
642 	 * This could be the case for a !-dl task that is boosted.
643 	 * Just go with full inherited parameters.
644 	 */
645 	if (dl_se->dl_deadline == 0) {
646 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
647 		dl_se->runtime = pi_se->dl_runtime;
648 	}
649 
650 	if (dl_se->dl_yielded && dl_se->runtime > 0)
651 		dl_se->runtime = 0;
652 
653 	/*
654 	 * We keep moving the deadline away until we get some
655 	 * available runtime for the entity. This ensures correct
656 	 * handling of situations where the runtime overrun is
657 	 * arbitrary large.
658 	 */
659 	while (dl_se->runtime <= 0) {
660 		dl_se->deadline += pi_se->dl_period;
661 		dl_se->runtime += pi_se->dl_runtime;
662 	}
663 
664 	/*
665 	 * At this point, the deadline really should be "in
666 	 * the future" with respect to rq->clock. If it's
667 	 * not, we are, for some reason, lagging too much!
668 	 * Anyway, after having warn userspace abut that,
669 	 * we still try to keep the things running by
670 	 * resetting the deadline and the budget of the
671 	 * entity.
672 	 */
673 	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
674 		printk_deferred_once("sched: DL replenish lagged too much\n");
675 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
676 		dl_se->runtime = pi_se->dl_runtime;
677 	}
678 
679 	if (dl_se->dl_yielded)
680 		dl_se->dl_yielded = 0;
681 	if (dl_se->dl_throttled)
682 		dl_se->dl_throttled = 0;
683 }
684 
685 /*
686  * Here we check if --at time t-- an entity (which is probably being
687  * [re]activated or, in general, enqueued) can use its remaining runtime
688  * and its current deadline _without_ exceeding the bandwidth it is
689  * assigned (function returns true if it can't). We are in fact applying
690  * one of the CBS rules: when a task wakes up, if the residual runtime
691  * over residual deadline fits within the allocated bandwidth, then we
692  * can keep the current (absolute) deadline and residual budget without
693  * disrupting the schedulability of the system. Otherwise, we should
694  * refill the runtime and set the deadline a period in the future,
695  * because keeping the current (absolute) deadline of the task would
696  * result in breaking guarantees promised to other tasks (refer to
697  * Documentation/scheduler/sched-deadline.txt for more informations).
698  *
699  * This function returns true if:
700  *
701  *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
702  *
703  * IOW we can't recycle current parameters.
704  *
705  * Notice that the bandwidth check is done against the deadline. For
706  * task with deadline equal to period this is the same of using
707  * dl_period instead of dl_deadline in the equation above.
708  */
dl_entity_overflow(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se,u64 t)709 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
710 			       struct sched_dl_entity *pi_se, u64 t)
711 {
712 	u64 left, right;
713 
714 	/*
715 	 * left and right are the two sides of the equation above,
716 	 * after a bit of shuffling to use multiplications instead
717 	 * of divisions.
718 	 *
719 	 * Note that none of the time values involved in the two
720 	 * multiplications are absolute: dl_deadline and dl_runtime
721 	 * are the relative deadline and the maximum runtime of each
722 	 * instance, runtime is the runtime left for the last instance
723 	 * and (deadline - t), since t is rq->clock, is the time left
724 	 * to the (absolute) deadline. Even if overflowing the u64 type
725 	 * is very unlikely to occur in both cases, here we scale down
726 	 * as we want to avoid that risk at all. Scaling down by 10
727 	 * means that we reduce granularity to 1us. We are fine with it,
728 	 * since this is only a true/false check and, anyway, thinking
729 	 * of anything below microseconds resolution is actually fiction
730 	 * (but still we want to give the user that illusion >;).
731 	 */
732 	left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
733 	right = ((dl_se->deadline - t) >> DL_SCALE) *
734 		(pi_se->dl_runtime >> DL_SCALE);
735 
736 	return dl_time_before(right, left);
737 }
738 
739 /*
740  * Revised wakeup rule [1]: For self-suspending tasks, rather then
741  * re-initializing task's runtime and deadline, the revised wakeup
742  * rule adjusts the task's runtime to avoid the task to overrun its
743  * density.
744  *
745  * Reasoning: a task may overrun the density if:
746  *    runtime / (deadline - t) > dl_runtime / dl_deadline
747  *
748  * Therefore, runtime can be adjusted to:
749  *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
750  *
751  * In such way that runtime will be equal to the maximum density
752  * the task can use without breaking any rule.
753  *
754  * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
755  * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
756  */
757 static void
update_dl_revised_wakeup(struct sched_dl_entity * dl_se,struct rq * rq)758 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
759 {
760 	u64 laxity = dl_se->deadline - rq_clock(rq);
761 
762 	/*
763 	 * If the task has deadline < period, and the deadline is in the past,
764 	 * it should already be throttled before this check.
765 	 *
766 	 * See update_dl_entity() comments for further details.
767 	 */
768 	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
769 
770 	dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
771 }
772 
773 /*
774  * Regarding the deadline, a task with implicit deadline has a relative
775  * deadline == relative period. A task with constrained deadline has a
776  * relative deadline <= relative period.
777  *
778  * We support constrained deadline tasks. However, there are some restrictions
779  * applied only for tasks which do not have an implicit deadline. See
780  * update_dl_entity() to know more about such restrictions.
781  *
782  * The dl_is_implicit() returns true if the task has an implicit deadline.
783  */
dl_is_implicit(struct sched_dl_entity * dl_se)784 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
785 {
786 	return dl_se->dl_deadline == dl_se->dl_period;
787 }
788 
789 /*
790  * When a deadline entity is placed in the runqueue, its runtime and deadline
791  * might need to be updated. This is done by a CBS wake up rule. There are two
792  * different rules: 1) the original CBS; and 2) the Revisited CBS.
793  *
794  * When the task is starting a new period, the Original CBS is used. In this
795  * case, the runtime is replenished and a new absolute deadline is set.
796  *
797  * When a task is queued before the begin of the next period, using the
798  * remaining runtime and deadline could make the entity to overflow, see
799  * dl_entity_overflow() to find more about runtime overflow. When such case
800  * is detected, the runtime and deadline need to be updated.
801  *
802  * If the task has an implicit deadline, i.e., deadline == period, the Original
803  * CBS is applied. the runtime is replenished and a new absolute deadline is
804  * set, as in the previous cases.
805  *
806  * However, the Original CBS does not work properly for tasks with
807  * deadline < period, which are said to have a constrained deadline. By
808  * applying the Original CBS, a constrained deadline task would be able to run
809  * runtime/deadline in a period. With deadline < period, the task would
810  * overrun the runtime/period allowed bandwidth, breaking the admission test.
811  *
812  * In order to prevent this misbehave, the Revisited CBS is used for
813  * constrained deadline tasks when a runtime overflow is detected. In the
814  * Revisited CBS, rather than replenishing & setting a new absolute deadline,
815  * the remaining runtime of the task is reduced to avoid runtime overflow.
816  * Please refer to the comments update_dl_revised_wakeup() function to find
817  * more about the Revised CBS rule.
818  */
update_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se)819 static void update_dl_entity(struct sched_dl_entity *dl_se,
820 			     struct sched_dl_entity *pi_se)
821 {
822 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
823 	struct rq *rq = rq_of_dl_rq(dl_rq);
824 
825 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
826 	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
827 
828 		if (unlikely(!dl_is_implicit(dl_se) &&
829 			     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
830 			     !dl_se->dl_boosted)){
831 			update_dl_revised_wakeup(dl_se, rq);
832 			return;
833 		}
834 
835 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
836 		dl_se->runtime = pi_se->dl_runtime;
837 	}
838 }
839 
dl_next_period(struct sched_dl_entity * dl_se)840 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
841 {
842 	return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
843 }
844 
845 /*
846  * If the entity depleted all its runtime, and if we want it to sleep
847  * while waiting for some new execution time to become available, we
848  * set the bandwidth replenishment timer to the replenishment instant
849  * and try to activate it.
850  *
851  * Notice that it is important for the caller to know if the timer
852  * actually started or not (i.e., the replenishment instant is in
853  * the future or in the past).
854  */
start_dl_timer(struct task_struct * p)855 static int start_dl_timer(struct task_struct *p)
856 {
857 	struct sched_dl_entity *dl_se = &p->dl;
858 	struct hrtimer *timer = &dl_se->dl_timer;
859 	struct rq *rq = task_rq(p);
860 	ktime_t now, act;
861 	s64 delta;
862 
863 	lockdep_assert_held(&rq->lock);
864 
865 	/*
866 	 * We want the timer to fire at the deadline, but considering
867 	 * that it is actually coming from rq->clock and not from
868 	 * hrtimer's time base reading.
869 	 */
870 	act = ns_to_ktime(dl_next_period(dl_se));
871 	now = hrtimer_cb_get_time(timer);
872 	delta = ktime_to_ns(now) - rq_clock(rq);
873 	act = ktime_add_ns(act, delta);
874 
875 	/*
876 	 * If the expiry time already passed, e.g., because the value
877 	 * chosen as the deadline is too small, don't even try to
878 	 * start the timer in the past!
879 	 */
880 	if (ktime_us_delta(act, now) < 0)
881 		return 0;
882 
883 	/*
884 	 * !enqueued will guarantee another callback; even if one is already in
885 	 * progress. This ensures a balanced {get,put}_task_struct().
886 	 *
887 	 * The race against __run_timer() clearing the enqueued state is
888 	 * harmless because we're holding task_rq()->lock, therefore the timer
889 	 * expiring after we've done the check will wait on its task_rq_lock()
890 	 * and observe our state.
891 	 */
892 	if (!hrtimer_is_queued(timer)) {
893 		get_task_struct(p);
894 		hrtimer_start(timer, act, HRTIMER_MODE_ABS);
895 	}
896 
897 	return 1;
898 }
899 
900 /*
901  * This is the bandwidth enforcement timer callback. If here, we know
902  * a task is not on its dl_rq, since the fact that the timer was running
903  * means the task is throttled and needs a runtime replenishment.
904  *
905  * However, what we actually do depends on the fact the task is active,
906  * (it is on its rq) or has been removed from there by a call to
907  * dequeue_task_dl(). In the former case we must issue the runtime
908  * replenishment and add the task back to the dl_rq; in the latter, we just
909  * do nothing but clearing dl_throttled, so that runtime and deadline
910  * updating (and the queueing back to dl_rq) will be done by the
911  * next call to enqueue_task_dl().
912  */
dl_task_timer(struct hrtimer * timer)913 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
914 {
915 	struct sched_dl_entity *dl_se = container_of(timer,
916 						     struct sched_dl_entity,
917 						     dl_timer);
918 	struct task_struct *p = dl_task_of(dl_se);
919 	struct rq_flags rf;
920 	struct rq *rq;
921 
922 	rq = task_rq_lock(p, &rf);
923 
924 	/*
925 	 * The task might have changed its scheduling policy to something
926 	 * different than SCHED_DEADLINE (through switched_from_dl()).
927 	 */
928 	if (!dl_task(p))
929 		goto unlock;
930 
931 	/*
932 	 * The task might have been boosted by someone else and might be in the
933 	 * boosting/deboosting path, its not throttled.
934 	 */
935 	if (dl_se->dl_boosted)
936 		goto unlock;
937 
938 	/*
939 	 * Spurious timer due to start_dl_timer() race; or we already received
940 	 * a replenishment from rt_mutex_setprio().
941 	 */
942 	if (!dl_se->dl_throttled)
943 		goto unlock;
944 
945 	sched_clock_tick();
946 	update_rq_clock(rq);
947 
948 	/*
949 	 * If the throttle happened during sched-out; like:
950 	 *
951 	 *   schedule()
952 	 *     deactivate_task()
953 	 *       dequeue_task_dl()
954 	 *         update_curr_dl()
955 	 *           start_dl_timer()
956 	 *         __dequeue_task_dl()
957 	 *     prev->on_rq = 0;
958 	 *
959 	 * We can be both throttled and !queued. Replenish the counter
960 	 * but do not enqueue -- wait for our wakeup to do that.
961 	 */
962 	if (!task_on_rq_queued(p)) {
963 		replenish_dl_entity(dl_se, dl_se);
964 		goto unlock;
965 	}
966 
967 #ifdef CONFIG_SMP
968 	if (unlikely(!rq->online)) {
969 		/*
970 		 * If the runqueue is no longer available, migrate the
971 		 * task elsewhere. This necessarily changes rq.
972 		 */
973 		lockdep_unpin_lock(&rq->lock, rf.cookie);
974 		rq = dl_task_offline_migration(rq, p);
975 		rf.cookie = lockdep_pin_lock(&rq->lock);
976 		update_rq_clock(rq);
977 
978 		/*
979 		 * Now that the task has been migrated to the new RQ and we
980 		 * have that locked, proceed as normal and enqueue the task
981 		 * there.
982 		 */
983 	}
984 #endif
985 
986 	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
987 	if (dl_task(rq->curr))
988 		check_preempt_curr_dl(rq, p, 0);
989 	else
990 		resched_curr(rq);
991 
992 #ifdef CONFIG_SMP
993 	/*
994 	 * Queueing this task back might have overloaded rq, check if we need
995 	 * to kick someone away.
996 	 */
997 	if (has_pushable_dl_tasks(rq)) {
998 		/*
999 		 * Nothing relies on rq->lock after this, so its safe to drop
1000 		 * rq->lock.
1001 		 */
1002 		rq_unpin_lock(rq, &rf);
1003 		push_dl_task(rq);
1004 		rq_repin_lock(rq, &rf);
1005 	}
1006 #endif
1007 
1008 unlock:
1009 	task_rq_unlock(rq, p, &rf);
1010 
1011 	/*
1012 	 * This can free the task_struct, including this hrtimer, do not touch
1013 	 * anything related to that after this.
1014 	 */
1015 	put_task_struct(p);
1016 
1017 	return HRTIMER_NORESTART;
1018 }
1019 
init_dl_task_timer(struct sched_dl_entity * dl_se)1020 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1021 {
1022 	struct hrtimer *timer = &dl_se->dl_timer;
1023 
1024 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1025 	timer->function = dl_task_timer;
1026 }
1027 
1028 /*
1029  * During the activation, CBS checks if it can reuse the current task's
1030  * runtime and period. If the deadline of the task is in the past, CBS
1031  * cannot use the runtime, and so it replenishes the task. This rule
1032  * works fine for implicit deadline tasks (deadline == period), and the
1033  * CBS was designed for implicit deadline tasks. However, a task with
1034  * constrained deadline (deadine < period) might be awakened after the
1035  * deadline, but before the next period. In this case, replenishing the
1036  * task would allow it to run for runtime / deadline. As in this case
1037  * deadline < period, CBS enables a task to run for more than the
1038  * runtime / period. In a very loaded system, this can cause a domino
1039  * effect, making other tasks miss their deadlines.
1040  *
1041  * To avoid this problem, in the activation of a constrained deadline
1042  * task after the deadline but before the next period, throttle the
1043  * task and set the replenishing timer to the begin of the next period,
1044  * unless it is boosted.
1045  */
dl_check_constrained_dl(struct sched_dl_entity * dl_se)1046 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1047 {
1048 	struct task_struct *p = dl_task_of(dl_se);
1049 	struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1050 
1051 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1052 	    dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1053 		if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1054 			return;
1055 		dl_se->dl_throttled = 1;
1056 		if (dl_se->runtime > 0)
1057 			dl_se->runtime = 0;
1058 	}
1059 }
1060 
1061 static
dl_runtime_exceeded(struct sched_dl_entity * dl_se)1062 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1063 {
1064 	return (dl_se->runtime <= 0);
1065 }
1066 
1067 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1068 
1069 /*
1070  * This function implements the GRUB accounting rule:
1071  * according to the GRUB reclaiming algorithm, the runtime is
1072  * not decreased as "dq = -dt", but as
1073  * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1074  * where u is the utilization of the task, Umax is the maximum reclaimable
1075  * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1076  * as the difference between the "total runqueue utilization" and the
1077  * runqueue active utilization, and Uextra is the (per runqueue) extra
1078  * reclaimable utilization.
1079  * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1080  * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1081  * BW_SHIFT.
1082  * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1083  * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1084  * Since delta is a 64 bit variable, to have an overflow its value
1085  * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1086  * So, overflow is not an issue here.
1087  */
grub_reclaim(u64 delta,struct rq * rq,struct sched_dl_entity * dl_se)1088 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1089 {
1090 	u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1091 	u64 u_act;
1092 	u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1093 
1094 	/*
1095 	 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1096 	 * we compare u_inact + rq->dl.extra_bw with
1097 	 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1098 	 * u_inact + rq->dl.extra_bw can be larger than
1099 	 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1100 	 * leading to wrong results)
1101 	 */
1102 	if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1103 		u_act = u_act_min;
1104 	else
1105 		u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1106 
1107 	return (delta * u_act) >> BW_SHIFT;
1108 }
1109 
1110 /*
1111  * Update the current task's runtime statistics (provided it is still
1112  * a -deadline task and has not been removed from the dl_rq).
1113  */
update_curr_dl(struct rq * rq)1114 static void update_curr_dl(struct rq *rq)
1115 {
1116 	struct task_struct *curr = rq->curr;
1117 	struct sched_dl_entity *dl_se = &curr->dl;
1118 	u64 delta_exec;
1119 
1120 	if (!dl_task(curr) || !on_dl_rq(dl_se))
1121 		return;
1122 
1123 	/*
1124 	 * Consumed budget is computed considering the time as
1125 	 * observed by schedulable tasks (excluding time spent
1126 	 * in hardirq context, etc.). Deadlines are instead
1127 	 * computed using hard walltime. This seems to be the more
1128 	 * natural solution, but the full ramifications of this
1129 	 * approach need further study.
1130 	 */
1131 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
1132 	if (unlikely((s64)delta_exec <= 0)) {
1133 		if (unlikely(dl_se->dl_yielded))
1134 			goto throttle;
1135 		return;
1136 	}
1137 
1138 	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
1139 	cpufreq_update_util(rq, SCHED_CPUFREQ_DL);
1140 
1141 	schedstat_set(curr->se.statistics.exec_max,
1142 		      max(curr->se.statistics.exec_max, delta_exec));
1143 
1144 	curr->se.sum_exec_runtime += delta_exec;
1145 	account_group_exec_runtime(curr, delta_exec);
1146 
1147 	curr->se.exec_start = rq_clock_task(rq);
1148 	cpuacct_charge(curr, delta_exec);
1149 
1150 	sched_rt_avg_update(rq, delta_exec);
1151 
1152 	if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM))
1153 		delta_exec = grub_reclaim(delta_exec, rq, &curr->dl);
1154 	dl_se->runtime -= delta_exec;
1155 
1156 throttle:
1157 	if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1158 		dl_se->dl_throttled = 1;
1159 		__dequeue_task_dl(rq, curr, 0);
1160 		if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1161 			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1162 
1163 		if (!is_leftmost(curr, &rq->dl))
1164 			resched_curr(rq);
1165 	}
1166 
1167 	/*
1168 	 * Because -- for now -- we share the rt bandwidth, we need to
1169 	 * account our runtime there too, otherwise actual rt tasks
1170 	 * would be able to exceed the shared quota.
1171 	 *
1172 	 * Account to the root rt group for now.
1173 	 *
1174 	 * The solution we're working towards is having the RT groups scheduled
1175 	 * using deadline servers -- however there's a few nasties to figure
1176 	 * out before that can happen.
1177 	 */
1178 	if (rt_bandwidth_enabled()) {
1179 		struct rt_rq *rt_rq = &rq->rt;
1180 
1181 		raw_spin_lock(&rt_rq->rt_runtime_lock);
1182 		/*
1183 		 * We'll let actual RT tasks worry about the overflow here, we
1184 		 * have our own CBS to keep us inline; only account when RT
1185 		 * bandwidth is relevant.
1186 		 */
1187 		if (sched_rt_bandwidth_account(rt_rq))
1188 			rt_rq->rt_time += delta_exec;
1189 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
1190 	}
1191 }
1192 
inactive_task_timer(struct hrtimer * timer)1193 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1194 {
1195 	struct sched_dl_entity *dl_se = container_of(timer,
1196 						     struct sched_dl_entity,
1197 						     inactive_timer);
1198 	struct task_struct *p = dl_task_of(dl_se);
1199 	struct rq_flags rf;
1200 	struct rq *rq;
1201 
1202 	rq = task_rq_lock(p, &rf);
1203 
1204 	if (!dl_task(p) || p->state == TASK_DEAD) {
1205 		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1206 
1207 		if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1208 			sub_running_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
1209 			sub_rq_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
1210 			dl_se->dl_non_contending = 0;
1211 		}
1212 
1213 		raw_spin_lock(&dl_b->lock);
1214 		__dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1215 		raw_spin_unlock(&dl_b->lock);
1216 		__dl_clear_params(p);
1217 
1218 		goto unlock;
1219 	}
1220 	if (dl_se->dl_non_contending == 0)
1221 		goto unlock;
1222 
1223 	sched_clock_tick();
1224 	update_rq_clock(rq);
1225 
1226 	sub_running_bw(dl_se->dl_bw, &rq->dl);
1227 	dl_se->dl_non_contending = 0;
1228 unlock:
1229 	task_rq_unlock(rq, p, &rf);
1230 	put_task_struct(p);
1231 
1232 	return HRTIMER_NORESTART;
1233 }
1234 
init_dl_inactive_task_timer(struct sched_dl_entity * dl_se)1235 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1236 {
1237 	struct hrtimer *timer = &dl_se->inactive_timer;
1238 
1239 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1240 	timer->function = inactive_task_timer;
1241 }
1242 
1243 #ifdef CONFIG_SMP
1244 
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1245 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1246 {
1247 	struct rq *rq = rq_of_dl_rq(dl_rq);
1248 
1249 	if (dl_rq->earliest_dl.curr == 0 ||
1250 	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1251 		dl_rq->earliest_dl.curr = deadline;
1252 		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1253 	}
1254 }
1255 
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1256 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1257 {
1258 	struct rq *rq = rq_of_dl_rq(dl_rq);
1259 
1260 	/*
1261 	 * Since we may have removed our earliest (and/or next earliest)
1262 	 * task we must recompute them.
1263 	 */
1264 	if (!dl_rq->dl_nr_running) {
1265 		dl_rq->earliest_dl.curr = 0;
1266 		dl_rq->earliest_dl.next = 0;
1267 		cpudl_clear(&rq->rd->cpudl, rq->cpu);
1268 	} else {
1269 		struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1270 		struct sched_dl_entity *entry;
1271 
1272 		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1273 		dl_rq->earliest_dl.curr = entry->deadline;
1274 		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1275 	}
1276 }
1277 
1278 #else
1279 
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1280 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1281 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1282 
1283 #endif /* CONFIG_SMP */
1284 
1285 static inline
inc_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)1286 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1287 {
1288 	int prio = dl_task_of(dl_se)->prio;
1289 	u64 deadline = dl_se->deadline;
1290 
1291 	WARN_ON(!dl_prio(prio));
1292 	dl_rq->dl_nr_running++;
1293 	add_nr_running(rq_of_dl_rq(dl_rq), 1);
1294 	walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
1295 
1296 	inc_dl_deadline(dl_rq, deadline);
1297 	inc_dl_migration(dl_se, dl_rq);
1298 }
1299 
1300 static inline
dec_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)1301 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1302 {
1303 	int prio = dl_task_of(dl_se)->prio;
1304 
1305 	WARN_ON(!dl_prio(prio));
1306 	WARN_ON(!dl_rq->dl_nr_running);
1307 	dl_rq->dl_nr_running--;
1308 	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1309 	walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
1310 
1311 	dec_dl_deadline(dl_rq, dl_se->deadline);
1312 	dec_dl_migration(dl_se, dl_rq);
1313 }
1314 
__enqueue_dl_entity(struct sched_dl_entity * dl_se)1315 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1316 {
1317 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1318 	struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1319 	struct rb_node *parent = NULL;
1320 	struct sched_dl_entity *entry;
1321 	int leftmost = 1;
1322 
1323 	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1324 
1325 	while (*link) {
1326 		parent = *link;
1327 		entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1328 		if (dl_time_before(dl_se->deadline, entry->deadline))
1329 			link = &parent->rb_left;
1330 		else {
1331 			link = &parent->rb_right;
1332 			leftmost = 0;
1333 		}
1334 	}
1335 
1336 	rb_link_node(&dl_se->rb_node, parent, link);
1337 	rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1338 
1339 	inc_dl_tasks(dl_se, dl_rq);
1340 }
1341 
__dequeue_dl_entity(struct sched_dl_entity * dl_se)1342 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1343 {
1344 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1345 
1346 	if (RB_EMPTY_NODE(&dl_se->rb_node))
1347 		return;
1348 
1349 	rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1350 	RB_CLEAR_NODE(&dl_se->rb_node);
1351 
1352 	dec_dl_tasks(dl_se, dl_rq);
1353 }
1354 
1355 static void
enqueue_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se,int flags)1356 enqueue_dl_entity(struct sched_dl_entity *dl_se,
1357 		  struct sched_dl_entity *pi_se, int flags)
1358 {
1359 	BUG_ON(on_dl_rq(dl_se));
1360 
1361 	/*
1362 	 * If this is a wakeup or a new instance, the scheduling
1363 	 * parameters of the task might need updating. Otherwise,
1364 	 * we want a replenishment of its runtime.
1365 	 */
1366 	if (flags & ENQUEUE_WAKEUP) {
1367 		task_contending(dl_se, flags);
1368 		update_dl_entity(dl_se, pi_se);
1369 	} else if (flags & ENQUEUE_REPLENISH) {
1370 		replenish_dl_entity(dl_se, pi_se);
1371 	} else if ((flags & ENQUEUE_RESTORE) &&
1372 		  dl_time_before(dl_se->deadline,
1373 				 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1374 		setup_new_dl_entity(dl_se);
1375 	}
1376 
1377 	__enqueue_dl_entity(dl_se);
1378 }
1379 
dequeue_dl_entity(struct sched_dl_entity * dl_se)1380 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1381 {
1382 	__dequeue_dl_entity(dl_se);
1383 }
1384 
enqueue_task_dl(struct rq * rq,struct task_struct * p,int flags)1385 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1386 {
1387 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
1388 	struct sched_dl_entity *pi_se = &p->dl;
1389 
1390 	/*
1391 	 * Use the scheduling parameters of the top pi-waiter task if:
1392 	 * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1393 	 * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1394 	 *   smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1395 	 *   boosted due to a SCHED_DEADLINE pi-waiter).
1396 	 * Otherwise we keep our runtime and deadline.
1397 	 */
1398 	if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1399 		pi_se = &pi_task->dl;
1400 	} else if (!dl_prio(p->normal_prio)) {
1401 		/*
1402 		 * Special case in which we have a !SCHED_DEADLINE task
1403 		 * that is going to be deboosted, but exceeds its
1404 		 * runtime while doing so. No point in replenishing
1405 		 * it, as it's going to return back to its original
1406 		 * scheduling class after this.
1407 		 */
1408 		BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1409 		return;
1410 	}
1411 
1412 	/*
1413 	 * Check if a constrained deadline task was activated
1414 	 * after the deadline but before the next period.
1415 	 * If that is the case, the task will be throttled and
1416 	 * the replenishment timer will be set to the next period.
1417 	 */
1418 	if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1419 		dl_check_constrained_dl(&p->dl);
1420 
1421 	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1422 		add_rq_bw(p->dl.dl_bw, &rq->dl);
1423 		add_running_bw(p->dl.dl_bw, &rq->dl);
1424 	}
1425 
1426 	/*
1427 	 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1428 	 * its budget it needs a replenishment and, since it now is on
1429 	 * its rq, the bandwidth timer callback (which clearly has not
1430 	 * run yet) will take care of this.
1431 	 * However, the active utilization does not depend on the fact
1432 	 * that the task is on the runqueue or not (but depends on the
1433 	 * task's state - in GRUB parlance, "inactive" vs "active contending").
1434 	 * In other words, even if a task is throttled its utilization must
1435 	 * be counted in the active utilization; hence, we need to call
1436 	 * add_running_bw().
1437 	 */
1438 	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1439 		if (flags & ENQUEUE_WAKEUP)
1440 			task_contending(&p->dl, flags);
1441 
1442 		return;
1443 	}
1444 
1445 	enqueue_dl_entity(&p->dl, pi_se, flags);
1446 
1447 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1448 		enqueue_pushable_dl_task(rq, p);
1449 }
1450 
__dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1451 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1452 {
1453 	dequeue_dl_entity(&p->dl);
1454 	dequeue_pushable_dl_task(rq, p);
1455 }
1456 
dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1457 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1458 {
1459 	update_curr_dl(rq);
1460 	__dequeue_task_dl(rq, p, flags);
1461 
1462 	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1463 		sub_running_bw(p->dl.dl_bw, &rq->dl);
1464 		sub_rq_bw(p->dl.dl_bw, &rq->dl);
1465 	}
1466 
1467 	/*
1468 	 * This check allows to start the inactive timer (or to immediately
1469 	 * decrease the active utilization, if needed) in two cases:
1470 	 * when the task blocks and when it is terminating
1471 	 * (p->state == TASK_DEAD). We can handle the two cases in the same
1472 	 * way, because from GRUB's point of view the same thing is happening
1473 	 * (the task moves from "active contending" to "active non contending"
1474 	 * or "inactive")
1475 	 */
1476 	if (flags & DEQUEUE_SLEEP)
1477 		task_non_contending(p);
1478 }
1479 
1480 /*
1481  * Yield task semantic for -deadline tasks is:
1482  *
1483  *   get off from the CPU until our next instance, with
1484  *   a new runtime. This is of little use now, since we
1485  *   don't have a bandwidth reclaiming mechanism. Anyway,
1486  *   bandwidth reclaiming is planned for the future, and
1487  *   yield_task_dl will indicate that some spare budget
1488  *   is available for other task instances to use it.
1489  */
yield_task_dl(struct rq * rq)1490 static void yield_task_dl(struct rq *rq)
1491 {
1492 	/*
1493 	 * We make the task go to sleep until its current deadline by
1494 	 * forcing its runtime to zero. This way, update_curr_dl() stops
1495 	 * it and the bandwidth timer will wake it up and will give it
1496 	 * new scheduling parameters (thanks to dl_yielded=1).
1497 	 */
1498 	rq->curr->dl.dl_yielded = 1;
1499 
1500 	update_rq_clock(rq);
1501 	update_curr_dl(rq);
1502 	/*
1503 	 * Tell update_rq_clock() that we've just updated,
1504 	 * so we don't do microscopic update in schedule()
1505 	 * and double the fastpath cost.
1506 	 */
1507 	rq_clock_skip_update(rq, true);
1508 }
1509 
1510 #ifdef CONFIG_SMP
1511 
1512 static int find_later_rq(struct task_struct *task);
1513 
1514 static int
select_task_rq_dl(struct task_struct * p,int cpu,int sd_flag,int flags,int sibling_count_hint)1515 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags,
1516 		  int sibling_count_hint)
1517 {
1518 	struct task_struct *curr;
1519 	struct rq *rq;
1520 
1521 	if (sd_flag != SD_BALANCE_WAKE)
1522 		goto out;
1523 
1524 	rq = cpu_rq(cpu);
1525 
1526 	rcu_read_lock();
1527 	curr = READ_ONCE(rq->curr); /* unlocked access */
1528 
1529 	/*
1530 	 * If we are dealing with a -deadline task, we must
1531 	 * decide where to wake it up.
1532 	 * If it has a later deadline and the current task
1533 	 * on this rq can't move (provided the waking task
1534 	 * can!) we prefer to send it somewhere else. On the
1535 	 * other hand, if it has a shorter deadline, we
1536 	 * try to make it stay here, it might be important.
1537 	 */
1538 	if (unlikely(dl_task(curr)) &&
1539 	    (curr->nr_cpus_allowed < 2 ||
1540 	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1541 	    (p->nr_cpus_allowed > 1)) {
1542 		int target = find_later_rq(p);
1543 
1544 		if (target != -1 &&
1545 				(dl_time_before(p->dl.deadline,
1546 					cpu_rq(target)->dl.earliest_dl.curr) ||
1547 				(cpu_rq(target)->dl.dl_nr_running == 0)))
1548 			cpu = target;
1549 	}
1550 	rcu_read_unlock();
1551 
1552 out:
1553 	return cpu;
1554 }
1555 
migrate_task_rq_dl(struct task_struct * p)1556 static void migrate_task_rq_dl(struct task_struct *p)
1557 {
1558 	struct rq *rq;
1559 
1560 	if (p->state != TASK_WAKING)
1561 		return;
1562 
1563 	rq = task_rq(p);
1564 	/*
1565 	 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1566 	 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1567 	 * rq->lock is not... So, lock it
1568 	 */
1569 	raw_spin_lock(&rq->lock);
1570 	if (p->dl.dl_non_contending) {
1571 		sub_running_bw(p->dl.dl_bw, &rq->dl);
1572 		p->dl.dl_non_contending = 0;
1573 		/*
1574 		 * If the timer handler is currently running and the
1575 		 * timer cannot be cancelled, inactive_task_timer()
1576 		 * will see that dl_not_contending is not set, and
1577 		 * will not touch the rq's active utilization,
1578 		 * so we are still safe.
1579 		 */
1580 		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1581 			put_task_struct(p);
1582 	}
1583 	sub_rq_bw(p->dl.dl_bw, &rq->dl);
1584 	raw_spin_unlock(&rq->lock);
1585 }
1586 
check_preempt_equal_dl(struct rq * rq,struct task_struct * p)1587 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1588 {
1589 	/*
1590 	 * Current can't be migrated, useless to reschedule,
1591 	 * let's hope p can move out.
1592 	 */
1593 	if (rq->curr->nr_cpus_allowed == 1 ||
1594 	    !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1595 		return;
1596 
1597 	/*
1598 	 * p is migratable, so let's not schedule it and
1599 	 * see if it is pushed or pulled somewhere else.
1600 	 */
1601 	if (p->nr_cpus_allowed != 1 &&
1602 	    cpudl_find(&rq->rd->cpudl, p, NULL))
1603 		return;
1604 
1605 	resched_curr(rq);
1606 }
1607 
1608 #endif /* CONFIG_SMP */
1609 
1610 /*
1611  * Only called when both the current and waking task are -deadline
1612  * tasks.
1613  */
check_preempt_curr_dl(struct rq * rq,struct task_struct * p,int flags)1614 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1615 				  int flags)
1616 {
1617 	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1618 		resched_curr(rq);
1619 		return;
1620 	}
1621 
1622 #ifdef CONFIG_SMP
1623 	/*
1624 	 * In the unlikely case current and p have the same deadline
1625 	 * let us try to decide what's the best thing to do...
1626 	 */
1627 	if ((p->dl.deadline == rq->curr->dl.deadline) &&
1628 	    !test_tsk_need_resched(rq->curr))
1629 		check_preempt_equal_dl(rq, p);
1630 #endif /* CONFIG_SMP */
1631 }
1632 
1633 #ifdef CONFIG_SCHED_HRTICK
start_hrtick_dl(struct rq * rq,struct task_struct * p)1634 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1635 {
1636 	hrtick_start(rq, p->dl.runtime);
1637 }
1638 #else /* !CONFIG_SCHED_HRTICK */
start_hrtick_dl(struct rq * rq,struct task_struct * p)1639 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1640 {
1641 }
1642 #endif
1643 
pick_next_dl_entity(struct rq * rq,struct dl_rq * dl_rq)1644 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1645 						   struct dl_rq *dl_rq)
1646 {
1647 	struct rb_node *left = rb_first_cached(&dl_rq->root);
1648 
1649 	if (!left)
1650 		return NULL;
1651 
1652 	return rb_entry(left, struct sched_dl_entity, rb_node);
1653 }
1654 
1655 static struct task_struct *
pick_next_task_dl(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)1656 pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1657 {
1658 	struct sched_dl_entity *dl_se;
1659 	struct task_struct *p;
1660 	struct dl_rq *dl_rq;
1661 
1662 	dl_rq = &rq->dl;
1663 
1664 	if (need_pull_dl_task(rq, prev)) {
1665 		/*
1666 		 * This is OK, because current is on_cpu, which avoids it being
1667 		 * picked for load-balance and preemption/IRQs are still
1668 		 * disabled avoiding further scheduler activity on it and we're
1669 		 * being very careful to re-start the picking loop.
1670 		 */
1671 		rq_unpin_lock(rq, rf);
1672 		pull_dl_task(rq);
1673 		rq_repin_lock(rq, rf);
1674 		/*
1675 		 * pull_dl_task() can drop (and re-acquire) rq->lock; this
1676 		 * means a stop task can slip in, in which case we need to
1677 		 * re-start task selection.
1678 		 */
1679 		if (rq->stop && task_on_rq_queued(rq->stop))
1680 			return RETRY_TASK;
1681 	}
1682 
1683 	/*
1684 	 * When prev is DL, we may throttle it in put_prev_task().
1685 	 * So, we update time before we check for dl_nr_running.
1686 	 */
1687 	if (prev->sched_class == &dl_sched_class)
1688 		update_curr_dl(rq);
1689 
1690 	if (unlikely(!dl_rq->dl_nr_running))
1691 		return NULL;
1692 
1693 	put_prev_task(rq, prev);
1694 
1695 	dl_se = pick_next_dl_entity(rq, dl_rq);
1696 	BUG_ON(!dl_se);
1697 
1698 	p = dl_task_of(dl_se);
1699 	p->se.exec_start = rq_clock_task(rq);
1700 
1701 	/* Running task will never be pushed. */
1702        dequeue_pushable_dl_task(rq, p);
1703 
1704 	if (hrtick_enabled(rq))
1705 		start_hrtick_dl(rq, p);
1706 
1707 	queue_push_tasks(rq);
1708 
1709 	return p;
1710 }
1711 
put_prev_task_dl(struct rq * rq,struct task_struct * p)1712 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1713 {
1714 	update_curr_dl(rq);
1715 
1716 	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1717 		enqueue_pushable_dl_task(rq, p);
1718 }
1719 
task_tick_dl(struct rq * rq,struct task_struct * p,int queued)1720 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1721 {
1722 	update_curr_dl(rq);
1723 
1724 	/*
1725 	 * Even when we have runtime, update_curr_dl() might have resulted in us
1726 	 * not being the leftmost task anymore. In that case NEED_RESCHED will
1727 	 * be set and schedule() will start a new hrtick for the next task.
1728 	 */
1729 	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1730 	    is_leftmost(p, &rq->dl))
1731 		start_hrtick_dl(rq, p);
1732 }
1733 
task_fork_dl(struct task_struct * p)1734 static void task_fork_dl(struct task_struct *p)
1735 {
1736 	/*
1737 	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1738 	 * sched_fork()
1739 	 */
1740 }
1741 
set_curr_task_dl(struct rq * rq)1742 static void set_curr_task_dl(struct rq *rq)
1743 {
1744 	struct task_struct *p = rq->curr;
1745 
1746 	p->se.exec_start = rq_clock_task(rq);
1747 
1748 	/* You can't push away the running task */
1749 	dequeue_pushable_dl_task(rq, p);
1750 }
1751 
1752 #ifdef CONFIG_SMP
1753 
1754 /* Only try algorithms three times */
1755 #define DL_MAX_TRIES 3
1756 
pick_dl_task(struct rq * rq,struct task_struct * p,int cpu)1757 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1758 {
1759 	if (!task_running(rq, p) &&
1760 	    cpumask_test_cpu(cpu, &p->cpus_allowed))
1761 		return 1;
1762 	return 0;
1763 }
1764 
1765 /*
1766  * Return the earliest pushable rq's task, which is suitable to be executed
1767  * on the CPU, NULL otherwise:
1768  */
pick_earliest_pushable_dl_task(struct rq * rq,int cpu)1769 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1770 {
1771 	struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1772 	struct task_struct *p = NULL;
1773 
1774 	if (!has_pushable_dl_tasks(rq))
1775 		return NULL;
1776 
1777 next_node:
1778 	if (next_node) {
1779 		p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1780 
1781 		if (pick_dl_task(rq, p, cpu))
1782 			return p;
1783 
1784 		next_node = rb_next(next_node);
1785 		goto next_node;
1786 	}
1787 
1788 	return NULL;
1789 }
1790 
1791 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1792 
find_later_rq(struct task_struct * task)1793 static int find_later_rq(struct task_struct *task)
1794 {
1795 	struct sched_domain *sd;
1796 	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1797 	int this_cpu = smp_processor_id();
1798 	int cpu = task_cpu(task);
1799 
1800 	/* Make sure the mask is initialized first */
1801 	if (unlikely(!later_mask))
1802 		return -1;
1803 
1804 	if (task->nr_cpus_allowed == 1)
1805 		return -1;
1806 
1807 	/*
1808 	 * We have to consider system topology and task affinity
1809 	 * first, then we can look for a suitable cpu.
1810 	 */
1811 	if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1812 		return -1;
1813 
1814 	/*
1815 	 * If we are here, some targets have been found, including
1816 	 * the most suitable which is, among the runqueues where the
1817 	 * current tasks have later deadlines than the task's one, the
1818 	 * rq with the latest possible one.
1819 	 *
1820 	 * Now we check how well this matches with task's
1821 	 * affinity and system topology.
1822 	 *
1823 	 * The last cpu where the task run is our first
1824 	 * guess, since it is most likely cache-hot there.
1825 	 */
1826 	if (cpumask_test_cpu(cpu, later_mask))
1827 		return cpu;
1828 	/*
1829 	 * Check if this_cpu is to be skipped (i.e., it is
1830 	 * not in the mask) or not.
1831 	 */
1832 	if (!cpumask_test_cpu(this_cpu, later_mask))
1833 		this_cpu = -1;
1834 
1835 	rcu_read_lock();
1836 	for_each_domain(cpu, sd) {
1837 		if (sd->flags & SD_WAKE_AFFINE) {
1838 			int best_cpu;
1839 
1840 			/*
1841 			 * If possible, preempting this_cpu is
1842 			 * cheaper than migrating.
1843 			 */
1844 			if (this_cpu != -1 &&
1845 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1846 				rcu_read_unlock();
1847 				return this_cpu;
1848 			}
1849 
1850 			best_cpu = cpumask_first_and(later_mask,
1851 							sched_domain_span(sd));
1852 			/*
1853 			 * Last chance: if a cpu being in both later_mask
1854 			 * and current sd span is valid, that becomes our
1855 			 * choice. Of course, the latest possible cpu is
1856 			 * already under consideration through later_mask.
1857 			 */
1858 			if (best_cpu < nr_cpu_ids) {
1859 				rcu_read_unlock();
1860 				return best_cpu;
1861 			}
1862 		}
1863 	}
1864 	rcu_read_unlock();
1865 
1866 	/*
1867 	 * At this point, all our guesses failed, we just return
1868 	 * 'something', and let the caller sort the things out.
1869 	 */
1870 	if (this_cpu != -1)
1871 		return this_cpu;
1872 
1873 	cpu = cpumask_any(later_mask);
1874 	if (cpu < nr_cpu_ids)
1875 		return cpu;
1876 
1877 	return -1;
1878 }
1879 
1880 /* Locks the rq it finds */
find_lock_later_rq(struct task_struct * task,struct rq * rq)1881 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1882 {
1883 	struct rq *later_rq = NULL;
1884 	int tries;
1885 	int cpu;
1886 
1887 	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1888 		cpu = find_later_rq(task);
1889 
1890 		if ((cpu == -1) || (cpu == rq->cpu))
1891 			break;
1892 
1893 		later_rq = cpu_rq(cpu);
1894 
1895 		if (later_rq->dl.dl_nr_running &&
1896 		    !dl_time_before(task->dl.deadline,
1897 					later_rq->dl.earliest_dl.curr)) {
1898 			/*
1899 			 * Target rq has tasks of equal or earlier deadline,
1900 			 * retrying does not release any lock and is unlikely
1901 			 * to yield a different result.
1902 			 */
1903 			later_rq = NULL;
1904 			break;
1905 		}
1906 
1907 		/* Retry if something changed. */
1908 		if (double_lock_balance(rq, later_rq)) {
1909 			if (unlikely(task_rq(task) != rq ||
1910 				     !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
1911 				     task_running(rq, task) ||
1912 				     !dl_task(task) ||
1913 				     !task_on_rq_queued(task))) {
1914 				double_unlock_balance(rq, later_rq);
1915 				later_rq = NULL;
1916 				break;
1917 			}
1918 		}
1919 
1920 		/*
1921 		 * If the rq we found has no -deadline task, or
1922 		 * its earliest one has a later deadline than our
1923 		 * task, the rq is a good one.
1924 		 */
1925 		if (!later_rq->dl.dl_nr_running ||
1926 		    dl_time_before(task->dl.deadline,
1927 				   later_rq->dl.earliest_dl.curr))
1928 			break;
1929 
1930 		/* Otherwise we try again. */
1931 		double_unlock_balance(rq, later_rq);
1932 		later_rq = NULL;
1933 	}
1934 
1935 	return later_rq;
1936 }
1937 
pick_next_pushable_dl_task(struct rq * rq)1938 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1939 {
1940 	struct task_struct *p;
1941 
1942 	if (!has_pushable_dl_tasks(rq))
1943 		return NULL;
1944 
1945 	p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
1946 		     struct task_struct, pushable_dl_tasks);
1947 
1948 	BUG_ON(rq->cpu != task_cpu(p));
1949 	BUG_ON(task_current(rq, p));
1950 	BUG_ON(p->nr_cpus_allowed <= 1);
1951 
1952 	BUG_ON(!task_on_rq_queued(p));
1953 	BUG_ON(!dl_task(p));
1954 
1955 	return p;
1956 }
1957 
1958 /*
1959  * See if the non running -deadline tasks on this rq
1960  * can be sent to some other CPU where they can preempt
1961  * and start executing.
1962  */
push_dl_task(struct rq * rq)1963 static int push_dl_task(struct rq *rq)
1964 {
1965 	struct task_struct *next_task;
1966 	struct rq *later_rq;
1967 	int ret = 0;
1968 
1969 	if (!rq->dl.overloaded)
1970 		return 0;
1971 
1972 	next_task = pick_next_pushable_dl_task(rq);
1973 	if (!next_task)
1974 		return 0;
1975 
1976 retry:
1977 	if (unlikely(next_task == rq->curr)) {
1978 		WARN_ON(1);
1979 		return 0;
1980 	}
1981 
1982 	/*
1983 	 * If next_task preempts rq->curr, and rq->curr
1984 	 * can move away, it makes sense to just reschedule
1985 	 * without going further in pushing next_task.
1986 	 */
1987 	if (dl_task(rq->curr) &&
1988 	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1989 	    rq->curr->nr_cpus_allowed > 1) {
1990 		resched_curr(rq);
1991 		return 0;
1992 	}
1993 
1994 	/* We might release rq lock */
1995 	get_task_struct(next_task);
1996 
1997 	/* Will lock the rq it'll find */
1998 	later_rq = find_lock_later_rq(next_task, rq);
1999 	if (!later_rq) {
2000 		struct task_struct *task;
2001 
2002 		/*
2003 		 * We must check all this again, since
2004 		 * find_lock_later_rq releases rq->lock and it is
2005 		 * then possible that next_task has migrated.
2006 		 */
2007 		task = pick_next_pushable_dl_task(rq);
2008 		if (task == next_task) {
2009 			/*
2010 			 * The task is still there. We don't try
2011 			 * again, some other cpu will pull it when ready.
2012 			 */
2013 			goto out;
2014 		}
2015 
2016 		if (!task)
2017 			/* No more tasks */
2018 			goto out;
2019 
2020 		put_task_struct(next_task);
2021 		next_task = task;
2022 		goto retry;
2023 	}
2024 
2025 	deactivate_task(rq, next_task, 0);
2026 	sub_running_bw(next_task->dl.dl_bw, &rq->dl);
2027 	sub_rq_bw(next_task->dl.dl_bw, &rq->dl);
2028 	next_task->on_rq = TASK_ON_RQ_MIGRATING;
2029 	set_task_cpu(next_task, later_rq->cpu);
2030 	next_task->on_rq = TASK_ON_RQ_QUEUED;
2031 	add_rq_bw(next_task->dl.dl_bw, &later_rq->dl);
2032 	add_running_bw(next_task->dl.dl_bw, &later_rq->dl);
2033 	activate_task(later_rq, next_task, 0);
2034 	ret = 1;
2035 
2036 	resched_curr(later_rq);
2037 
2038 	double_unlock_balance(rq, later_rq);
2039 
2040 out:
2041 	put_task_struct(next_task);
2042 
2043 	return ret;
2044 }
2045 
push_dl_tasks(struct rq * rq)2046 static void push_dl_tasks(struct rq *rq)
2047 {
2048 	/* push_dl_task() will return true if it moved a -deadline task */
2049 	while (push_dl_task(rq))
2050 		;
2051 }
2052 
pull_dl_task(struct rq * this_rq)2053 static void pull_dl_task(struct rq *this_rq)
2054 {
2055 	int this_cpu = this_rq->cpu, cpu;
2056 	struct task_struct *p;
2057 	bool resched = false;
2058 	struct rq *src_rq;
2059 	u64 dmin = LONG_MAX;
2060 
2061 	if (likely(!dl_overloaded(this_rq)))
2062 		return;
2063 
2064 	/*
2065 	 * Match the barrier from dl_set_overloaded; this guarantees that if we
2066 	 * see overloaded we must also see the dlo_mask bit.
2067 	 */
2068 	smp_rmb();
2069 
2070 	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2071 		if (this_cpu == cpu)
2072 			continue;
2073 
2074 		src_rq = cpu_rq(cpu);
2075 
2076 		/*
2077 		 * It looks racy, abd it is! However, as in sched_rt.c,
2078 		 * we are fine with this.
2079 		 */
2080 		if (this_rq->dl.dl_nr_running &&
2081 		    dl_time_before(this_rq->dl.earliest_dl.curr,
2082 				   src_rq->dl.earliest_dl.next))
2083 			continue;
2084 
2085 		/* Might drop this_rq->lock */
2086 		double_lock_balance(this_rq, src_rq);
2087 
2088 		/*
2089 		 * If there are no more pullable tasks on the
2090 		 * rq, we're done with it.
2091 		 */
2092 		if (src_rq->dl.dl_nr_running <= 1)
2093 			goto skip;
2094 
2095 		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2096 
2097 		/*
2098 		 * We found a task to be pulled if:
2099 		 *  - it preempts our current (if there's one),
2100 		 *  - it will preempt the last one we pulled (if any).
2101 		 */
2102 		if (p && dl_time_before(p->dl.deadline, dmin) &&
2103 		    (!this_rq->dl.dl_nr_running ||
2104 		     dl_time_before(p->dl.deadline,
2105 				    this_rq->dl.earliest_dl.curr))) {
2106 			WARN_ON(p == src_rq->curr);
2107 			WARN_ON(!task_on_rq_queued(p));
2108 
2109 			/*
2110 			 * Then we pull iff p has actually an earlier
2111 			 * deadline than the current task of its runqueue.
2112 			 */
2113 			if (dl_time_before(p->dl.deadline,
2114 					   src_rq->curr->dl.deadline))
2115 				goto skip;
2116 
2117 			resched = true;
2118 
2119 			deactivate_task(src_rq, p, 0);
2120 			sub_running_bw(p->dl.dl_bw, &src_rq->dl);
2121 			sub_rq_bw(p->dl.dl_bw, &src_rq->dl);
2122 			p->on_rq = TASK_ON_RQ_MIGRATING;
2123 			set_task_cpu(p, this_cpu);
2124 			p->on_rq = TASK_ON_RQ_QUEUED;
2125 			add_rq_bw(p->dl.dl_bw, &this_rq->dl);
2126 			add_running_bw(p->dl.dl_bw, &this_rq->dl);
2127 			activate_task(this_rq, p, 0);
2128 			dmin = p->dl.deadline;
2129 
2130 			/* Is there any other task even earlier? */
2131 		}
2132 skip:
2133 		double_unlock_balance(this_rq, src_rq);
2134 	}
2135 
2136 	if (resched)
2137 		resched_curr(this_rq);
2138 }
2139 
2140 /*
2141  * Since the task is not running and a reschedule is not going to happen
2142  * anytime soon on its runqueue, we try pushing it away now.
2143  */
task_woken_dl(struct rq * rq,struct task_struct * p)2144 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2145 {
2146 	if (!task_running(rq, p) &&
2147 	    !test_tsk_need_resched(rq->curr) &&
2148 	    p->nr_cpus_allowed > 1 &&
2149 	    dl_task(rq->curr) &&
2150 	    (rq->curr->nr_cpus_allowed < 2 ||
2151 	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2152 		push_dl_tasks(rq);
2153 	}
2154 }
2155 
set_cpus_allowed_dl(struct task_struct * p,const struct cpumask * new_mask)2156 static void set_cpus_allowed_dl(struct task_struct *p,
2157 				const struct cpumask *new_mask)
2158 {
2159 	struct root_domain *src_rd;
2160 	struct rq *rq;
2161 
2162 	BUG_ON(!dl_task(p));
2163 
2164 	rq = task_rq(p);
2165 	src_rd = rq->rd;
2166 	/*
2167 	 * Migrating a SCHED_DEADLINE task between exclusive
2168 	 * cpusets (different root_domains) entails a bandwidth
2169 	 * update. We already made space for us in the destination
2170 	 * domain (see cpuset_can_attach()).
2171 	 */
2172 	if (!cpumask_intersects(src_rd->span, new_mask)) {
2173 		struct dl_bw *src_dl_b;
2174 
2175 		src_dl_b = dl_bw_of(cpu_of(rq));
2176 		/*
2177 		 * We now free resources of the root_domain we are migrating
2178 		 * off. In the worst case, sched_setattr() may temporary fail
2179 		 * until we complete the update.
2180 		 */
2181 		raw_spin_lock(&src_dl_b->lock);
2182 		__dl_clear(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2183 		raw_spin_unlock(&src_dl_b->lock);
2184 	}
2185 
2186 	set_cpus_allowed_common(p, new_mask);
2187 }
2188 
2189 /* Assumes rq->lock is held */
rq_online_dl(struct rq * rq)2190 static void rq_online_dl(struct rq *rq)
2191 {
2192 	if (rq->dl.overloaded)
2193 		dl_set_overload(rq);
2194 
2195 	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2196 	if (rq->dl.dl_nr_running > 0)
2197 		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2198 }
2199 
2200 /* Assumes rq->lock is held */
rq_offline_dl(struct rq * rq)2201 static void rq_offline_dl(struct rq *rq)
2202 {
2203 	if (rq->dl.overloaded)
2204 		dl_clear_overload(rq);
2205 
2206 	cpudl_clear(&rq->rd->cpudl, rq->cpu);
2207 	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2208 }
2209 
init_sched_dl_class(void)2210 void __init init_sched_dl_class(void)
2211 {
2212 	unsigned int i;
2213 
2214 	for_each_possible_cpu(i)
2215 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2216 					GFP_KERNEL, cpu_to_node(i));
2217 }
2218 
2219 #endif /* CONFIG_SMP */
2220 
switched_from_dl(struct rq * rq,struct task_struct * p)2221 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2222 {
2223 	/*
2224 	 * task_non_contending() can start the "inactive timer" (if the 0-lag
2225 	 * time is in the future). If the task switches back to dl before
2226 	 * the "inactive timer" fires, it can continue to consume its current
2227 	 * runtime using its current deadline. If it stays outside of
2228 	 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2229 	 * will reset the task parameters.
2230 	 */
2231 	if (task_on_rq_queued(p) && p->dl.dl_runtime)
2232 		task_non_contending(p);
2233 
2234 	if (!task_on_rq_queued(p))
2235 		sub_rq_bw(p->dl.dl_bw, &rq->dl);
2236 
2237 	/*
2238 	 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2239 	 * at the 0-lag time, because the task could have been migrated
2240 	 * while SCHED_OTHER in the meanwhile.
2241 	 */
2242 	if (p->dl.dl_non_contending)
2243 		p->dl.dl_non_contending = 0;
2244 
2245 	/*
2246 	 * Since this might be the only -deadline task on the rq,
2247 	 * this is the right place to try to pull some other one
2248 	 * from an overloaded cpu, if any.
2249 	 */
2250 	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2251 		return;
2252 
2253 	queue_pull_task(rq);
2254 }
2255 
2256 /*
2257  * When switching to -deadline, we may overload the rq, then
2258  * we try to push someone off, if possible.
2259  */
switched_to_dl(struct rq * rq,struct task_struct * p)2260 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2261 {
2262 	if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2263 		put_task_struct(p);
2264 
2265 	/* If p is not queued we will update its parameters at next wakeup. */
2266 	if (!task_on_rq_queued(p)) {
2267 		add_rq_bw(p->dl.dl_bw, &rq->dl);
2268 
2269 		return;
2270 	}
2271 
2272 	if (rq->curr != p) {
2273 #ifdef CONFIG_SMP
2274 		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2275 			queue_push_tasks(rq);
2276 #endif
2277 		if (dl_task(rq->curr))
2278 			check_preempt_curr_dl(rq, p, 0);
2279 		else
2280 			resched_curr(rq);
2281 	}
2282 }
2283 
2284 /*
2285  * If the scheduling parameters of a -deadline task changed,
2286  * a push or pull operation might be needed.
2287  */
prio_changed_dl(struct rq * rq,struct task_struct * p,int oldprio)2288 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2289 			    int oldprio)
2290 {
2291 	if (task_on_rq_queued(p) || rq->curr == p) {
2292 #ifdef CONFIG_SMP
2293 		/*
2294 		 * This might be too much, but unfortunately
2295 		 * we don't have the old deadline value, and
2296 		 * we can't argue if the task is increasing
2297 		 * or lowering its prio, so...
2298 		 */
2299 		if (!rq->dl.overloaded)
2300 			queue_pull_task(rq);
2301 
2302 		/*
2303 		 * If we now have a earlier deadline task than p,
2304 		 * then reschedule, provided p is still on this
2305 		 * runqueue.
2306 		 */
2307 		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2308 			resched_curr(rq);
2309 #else
2310 		/*
2311 		 * Again, we don't know if p has a earlier
2312 		 * or later deadline, so let's blindly set a
2313 		 * (maybe not needed) rescheduling point.
2314 		 */
2315 		resched_curr(rq);
2316 #endif /* CONFIG_SMP */
2317 	}
2318 }
2319 
2320 const struct sched_class dl_sched_class = {
2321 	.next			= &rt_sched_class,
2322 	.enqueue_task		= enqueue_task_dl,
2323 	.dequeue_task		= dequeue_task_dl,
2324 	.yield_task		= yield_task_dl,
2325 
2326 	.check_preempt_curr	= check_preempt_curr_dl,
2327 
2328 	.pick_next_task		= pick_next_task_dl,
2329 	.put_prev_task		= put_prev_task_dl,
2330 
2331 #ifdef CONFIG_SMP
2332 	.select_task_rq		= select_task_rq_dl,
2333 	.migrate_task_rq	= migrate_task_rq_dl,
2334 	.set_cpus_allowed       = set_cpus_allowed_dl,
2335 	.rq_online              = rq_online_dl,
2336 	.rq_offline             = rq_offline_dl,
2337 	.task_woken		= task_woken_dl,
2338 #endif
2339 
2340 	.set_curr_task		= set_curr_task_dl,
2341 	.task_tick		= task_tick_dl,
2342 	.task_fork              = task_fork_dl,
2343 
2344 	.prio_changed           = prio_changed_dl,
2345 	.switched_from		= switched_from_dl,
2346 	.switched_to		= switched_to_dl,
2347 
2348 	.update_curr		= update_curr_dl,
2349 };
2350 
sched_dl_global_validate(void)2351 int sched_dl_global_validate(void)
2352 {
2353 	u64 runtime = global_rt_runtime();
2354 	u64 period = global_rt_period();
2355 	u64 new_bw = to_ratio(period, runtime);
2356 	struct dl_bw *dl_b;
2357 	int cpu, ret = 0;
2358 	unsigned long flags;
2359 
2360 	/*
2361 	 * Here we want to check the bandwidth not being set to some
2362 	 * value smaller than the currently allocated bandwidth in
2363 	 * any of the root_domains.
2364 	 *
2365 	 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2366 	 * cycling on root_domains... Discussion on different/better
2367 	 * solutions is welcome!
2368 	 */
2369 	for_each_possible_cpu(cpu) {
2370 		rcu_read_lock_sched();
2371 		dl_b = dl_bw_of(cpu);
2372 
2373 		raw_spin_lock_irqsave(&dl_b->lock, flags);
2374 		if (new_bw < dl_b->total_bw)
2375 			ret = -EBUSY;
2376 		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2377 
2378 		rcu_read_unlock_sched();
2379 
2380 		if (ret)
2381 			break;
2382 	}
2383 
2384 	return ret;
2385 }
2386 
init_dl_rq_bw_ratio(struct dl_rq * dl_rq)2387 void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2388 {
2389 	if (global_rt_runtime() == RUNTIME_INF) {
2390 		dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2391 		dl_rq->extra_bw = 1 << BW_SHIFT;
2392 	} else {
2393 		dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2394 			  global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2395 		dl_rq->extra_bw = to_ratio(global_rt_period(),
2396 						    global_rt_runtime());
2397 	}
2398 }
2399 
sched_dl_do_global(void)2400 void sched_dl_do_global(void)
2401 {
2402 	u64 new_bw = -1;
2403 	struct dl_bw *dl_b;
2404 	int cpu;
2405 	unsigned long flags;
2406 
2407 	def_dl_bandwidth.dl_period = global_rt_period();
2408 	def_dl_bandwidth.dl_runtime = global_rt_runtime();
2409 
2410 	if (global_rt_runtime() != RUNTIME_INF)
2411 		new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2412 
2413 	/*
2414 	 * FIXME: As above...
2415 	 */
2416 	for_each_possible_cpu(cpu) {
2417 		rcu_read_lock_sched();
2418 		dl_b = dl_bw_of(cpu);
2419 
2420 		raw_spin_lock_irqsave(&dl_b->lock, flags);
2421 		dl_b->bw = new_bw;
2422 		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2423 
2424 		rcu_read_unlock_sched();
2425 		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2426 	}
2427 }
2428 
2429 /*
2430  * We must be sure that accepting a new task (or allowing changing the
2431  * parameters of an existing one) is consistent with the bandwidth
2432  * constraints. If yes, this function also accordingly updates the currently
2433  * allocated bandwidth to reflect the new situation.
2434  *
2435  * This function is called while holding p's rq->lock.
2436  */
sched_dl_overflow(struct task_struct * p,int policy,const struct sched_attr * attr)2437 int sched_dl_overflow(struct task_struct *p, int policy,
2438 		      const struct sched_attr *attr)
2439 {
2440 	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2441 	u64 period = attr->sched_period ?: attr->sched_deadline;
2442 	u64 runtime = attr->sched_runtime;
2443 	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2444 	int cpus, err = -1;
2445 
2446 	/* !deadline task may carry old deadline bandwidth */
2447 	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2448 		return 0;
2449 
2450 	/*
2451 	 * Either if a task, enters, leave, or stays -deadline but changes
2452 	 * its parameters, we may need to update accordingly the total
2453 	 * allocated bandwidth of the container.
2454 	 */
2455 	raw_spin_lock(&dl_b->lock);
2456 	cpus = dl_bw_cpus(task_cpu(p));
2457 	if (dl_policy(policy) && !task_has_dl_policy(p) &&
2458 	    !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2459 		if (hrtimer_active(&p->dl.inactive_timer))
2460 			__dl_clear(dl_b, p->dl.dl_bw, cpus);
2461 		__dl_add(dl_b, new_bw, cpus);
2462 		err = 0;
2463 	} else if (dl_policy(policy) && task_has_dl_policy(p) &&
2464 		   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2465 		/*
2466 		 * XXX this is slightly incorrect: when the task
2467 		 * utilization decreases, we should delay the total
2468 		 * utilization change until the task's 0-lag point.
2469 		 * But this would require to set the task's "inactive
2470 		 * timer" when the task is not inactive.
2471 		 */
2472 		__dl_clear(dl_b, p->dl.dl_bw, cpus);
2473 		__dl_add(dl_b, new_bw, cpus);
2474 		dl_change_utilization(p, new_bw);
2475 		err = 0;
2476 	} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2477 		/*
2478 		 * Do not decrease the total deadline utilization here,
2479 		 * switched_from_dl() will take care to do it at the correct
2480 		 * (0-lag) time.
2481 		 */
2482 		err = 0;
2483 	}
2484 	raw_spin_unlock(&dl_b->lock);
2485 
2486 	return err;
2487 }
2488 
2489 /*
2490  * This function initializes the sched_dl_entity of a newly becoming
2491  * SCHED_DEADLINE task.
2492  *
2493  * Only the static values are considered here, the actual runtime and the
2494  * absolute deadline will be properly calculated when the task is enqueued
2495  * for the first time with its new policy.
2496  */
__setparam_dl(struct task_struct * p,const struct sched_attr * attr)2497 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2498 {
2499 	struct sched_dl_entity *dl_se = &p->dl;
2500 
2501 	dl_se->dl_runtime = attr->sched_runtime;
2502 	dl_se->dl_deadline = attr->sched_deadline;
2503 	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2504 	dl_se->flags = attr->sched_flags;
2505 	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2506 	dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2507 }
2508 
__getparam_dl(struct task_struct * p,struct sched_attr * attr)2509 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2510 {
2511 	struct sched_dl_entity *dl_se = &p->dl;
2512 
2513 	attr->sched_priority = p->rt_priority;
2514 	attr->sched_runtime = dl_se->dl_runtime;
2515 	attr->sched_deadline = dl_se->dl_deadline;
2516 	attr->sched_period = dl_se->dl_period;
2517 	attr->sched_flags = dl_se->flags;
2518 }
2519 
2520 /*
2521  * This function validates the new parameters of a -deadline task.
2522  * We ask for the deadline not being zero, and greater or equal
2523  * than the runtime, as well as the period of being zero or
2524  * greater than deadline. Furthermore, we have to be sure that
2525  * user parameters are above the internal resolution of 1us (we
2526  * check sched_runtime only since it is always the smaller one) and
2527  * below 2^63 ns (we have to check both sched_deadline and
2528  * sched_period, as the latter can be zero).
2529  */
__checkparam_dl(const struct sched_attr * attr)2530 bool __checkparam_dl(const struct sched_attr *attr)
2531 {
2532 	/* deadline != 0 */
2533 	if (attr->sched_deadline == 0)
2534 		return false;
2535 
2536 	/*
2537 	 * Since we truncate DL_SCALE bits, make sure we're at least
2538 	 * that big.
2539 	 */
2540 	if (attr->sched_runtime < (1ULL << DL_SCALE))
2541 		return false;
2542 
2543 	/*
2544 	 * Since we use the MSB for wrap-around and sign issues, make
2545 	 * sure it's not set (mind that period can be equal to zero).
2546 	 */
2547 	if (attr->sched_deadline & (1ULL << 63) ||
2548 	    attr->sched_period & (1ULL << 63))
2549 		return false;
2550 
2551 	/* runtime <= deadline <= period (if period != 0) */
2552 	if ((attr->sched_period != 0 &&
2553 	     attr->sched_period < attr->sched_deadline) ||
2554 	    attr->sched_deadline < attr->sched_runtime)
2555 		return false;
2556 
2557 	return true;
2558 }
2559 
2560 /*
2561  * This function clears the sched_dl_entity static params.
2562  */
__dl_clear_params(struct task_struct * p)2563 void __dl_clear_params(struct task_struct *p)
2564 {
2565 	struct sched_dl_entity *dl_se = &p->dl;
2566 
2567 	dl_se->dl_runtime = 0;
2568 	dl_se->dl_deadline = 0;
2569 	dl_se->dl_period = 0;
2570 	dl_se->flags = 0;
2571 	dl_se->dl_bw = 0;
2572 	dl_se->dl_density = 0;
2573 
2574 	dl_se->dl_throttled = 0;
2575 	dl_se->dl_yielded = 0;
2576 	dl_se->dl_non_contending = 0;
2577 }
2578 
dl_param_changed(struct task_struct * p,const struct sched_attr * attr)2579 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2580 {
2581 	struct sched_dl_entity *dl_se = &p->dl;
2582 
2583 	if (dl_se->dl_runtime != attr->sched_runtime ||
2584 	    dl_se->dl_deadline != attr->sched_deadline ||
2585 	    dl_se->dl_period != attr->sched_period ||
2586 	    dl_se->flags != attr->sched_flags)
2587 		return true;
2588 
2589 	return false;
2590 }
2591 
2592 #ifdef CONFIG_SMP
dl_task_can_attach(struct task_struct * p,const struct cpumask * cs_cpus_allowed)2593 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2594 {
2595 	unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
2596 							cs_cpus_allowed);
2597 	struct dl_bw *dl_b;
2598 	bool overflow;
2599 	int cpus, ret;
2600 	unsigned long flags;
2601 
2602 	rcu_read_lock_sched();
2603 	dl_b = dl_bw_of(dest_cpu);
2604 	raw_spin_lock_irqsave(&dl_b->lock, flags);
2605 	cpus = dl_bw_cpus(dest_cpu);
2606 	overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
2607 	if (overflow)
2608 		ret = -EBUSY;
2609 	else {
2610 		/*
2611 		 * We reserve space for this task in the destination
2612 		 * root_domain, as we can't fail after this point.
2613 		 * We will free resources in the source root_domain
2614 		 * later on (see set_cpus_allowed_dl()).
2615 		 */
2616 		__dl_add(dl_b, p->dl.dl_bw, cpus);
2617 		ret = 0;
2618 	}
2619 	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2620 	rcu_read_unlock_sched();
2621 	return ret;
2622 }
2623 
dl_cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)2624 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2625 				 const struct cpumask *trial)
2626 {
2627 	int ret = 1, trial_cpus;
2628 	struct dl_bw *cur_dl_b;
2629 	unsigned long flags;
2630 
2631 	rcu_read_lock_sched();
2632 	cur_dl_b = dl_bw_of(cpumask_any(cur));
2633 	trial_cpus = cpumask_weight(trial);
2634 
2635 	raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2636 	if (cur_dl_b->bw != -1 &&
2637 	    cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2638 		ret = 0;
2639 	raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2640 	rcu_read_unlock_sched();
2641 	return ret;
2642 }
2643 
dl_cpu_busy(unsigned int cpu)2644 bool dl_cpu_busy(unsigned int cpu)
2645 {
2646 	unsigned long flags;
2647 	struct dl_bw *dl_b;
2648 	bool overflow;
2649 	int cpus;
2650 
2651 	rcu_read_lock_sched();
2652 	dl_b = dl_bw_of(cpu);
2653 	raw_spin_lock_irqsave(&dl_b->lock, flags);
2654 	cpus = dl_bw_cpus(cpu);
2655 	overflow = __dl_overflow(dl_b, cpus, 0, 0);
2656 	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2657 	rcu_read_unlock_sched();
2658 	return overflow;
2659 }
2660 #endif
2661 
2662 #ifdef CONFIG_SCHED_DEBUG
print_dl_stats(struct seq_file * m,int cpu)2663 void print_dl_stats(struct seq_file *m, int cpu)
2664 {
2665 	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2666 }
2667 #endif /* CONFIG_SCHED_DEBUG */
2668