Lines Matching refs:rt_rq
66 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) in sched_rt_runtime() argument
68 if (!rt_rq->tg) in sched_rt_runtime()
71 return rt_rq->rt_runtime; in sched_rt_runtime()
74 static inline u64 sched_rt_period(struct rt_rq *rt_rq) in sched_rt_period() argument
76 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); in sched_rt_period()
79 #define for_each_leaf_rt_rq(rt_rq, rq) \ argument
80 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
82 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() argument
84 return rt_rq->rq; in rq_of_rt_rq()
87 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) in rt_rq_of_se()
89 return rt_se->rt_rq; in rt_rq_of_se()
95 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) in group_rt_rq()
103 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) in sched_rt_rq_enqueue() argument
105 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue()
106 struct sched_rt_entity *rt_se = rt_rq->rt_se; in sched_rt_rq_enqueue()
108 if (rt_rq->rt_nr_running) { in sched_rt_rq_enqueue()
111 if (rt_rq->highest_prio < curr->prio) in sched_rt_rq_enqueue()
116 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) in sched_rt_rq_dequeue() argument
118 struct sched_rt_entity *rt_se = rt_rq->rt_se; in sched_rt_rq_dequeue()
124 static inline int rt_rq_throttled(struct rt_rq *rt_rq) in rt_rq_throttled() argument
126 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; in rt_rq_throttled()
131 struct rt_rq *rt_rq = group_rt_rq(rt_se); in rt_se_boosted() local
134 if (rt_rq) in rt_se_boosted()
135 return !!rt_rq->rt_nr_boosted; in rt_se_boosted()
154 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
156 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; in sched_rt_period_rt_rq()
159 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth() argument
161 return &rt_rq->tg->rt_bandwidth; in sched_rt_bandwidth()
166 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) in sched_rt_runtime() argument
168 return rt_rq->rt_runtime; in sched_rt_runtime()
171 static inline u64 sched_rt_period(struct rt_rq *rt_rq) in sched_rt_period() argument
176 #define for_each_leaf_rt_rq(rt_rq, rq) \ argument
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() argument
181 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) in rt_rq_of_se()
195 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) in group_rt_rq()
200 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) in sched_rt_rq_enqueue() argument
202 if (rt_rq->rt_nr_running) in sched_rt_rq_enqueue()
203 resched_task(rq_of_rt_rq(rt_rq)->curr); in sched_rt_rq_enqueue()
206 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) in sched_rt_rq_dequeue() argument
210 static inline int rt_rq_throttled(struct rt_rq *rt_rq) in rt_rq_throttled() argument
212 return rt_rq->rt_throttled; in rt_rq_throttled()
221 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
226 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth() argument
237 static int do_balance_runtime(struct rt_rq *rt_rq) in do_balance_runtime() argument
239 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in do_balance_runtime()
249 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in do_balance_runtime()
252 if (iter == rt_rq) in do_balance_runtime()
271 if (rt_rq->rt_runtime + diff > rt_period) in do_balance_runtime()
272 diff = rt_period - rt_rq->rt_runtime; in do_balance_runtime()
274 rt_rq->rt_runtime += diff; in do_balance_runtime()
276 if (rt_rq->rt_runtime == rt_period) { in do_balance_runtime()
295 struct rt_rq *rt_rq; in __disable_runtime() local
300 for_each_leaf_rt_rq(rt_rq, rq) { in __disable_runtime()
301 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __disable_runtime()
306 spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
312 if (rt_rq->rt_runtime == RUNTIME_INF || in __disable_runtime()
313 rt_rq->rt_runtime == rt_b->rt_runtime) in __disable_runtime()
315 spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
322 want = rt_b->rt_runtime - rt_rq->rt_runtime; in __disable_runtime()
328 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in __disable_runtime()
334 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) in __disable_runtime()
352 spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
363 rt_rq->rt_runtime = RUNTIME_INF; in __disable_runtime()
364 spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
380 struct rt_rq *rt_rq; in __enable_runtime() local
388 for_each_leaf_rt_rq(rt_rq, rq) { in __enable_runtime()
389 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __enable_runtime()
392 spin_lock(&rt_rq->rt_runtime_lock); in __enable_runtime()
393 rt_rq->rt_runtime = rt_b->rt_runtime; in __enable_runtime()
394 rt_rq->rt_time = 0; in __enable_runtime()
395 rt_rq->rt_throttled = 0; in __enable_runtime()
396 spin_unlock(&rt_rq->rt_runtime_lock); in __enable_runtime()
410 static int balance_runtime(struct rt_rq *rt_rq) in balance_runtime() argument
414 if (rt_rq->rt_time > rt_rq->rt_runtime) { in balance_runtime()
415 spin_unlock(&rt_rq->rt_runtime_lock); in balance_runtime()
416 more = do_balance_runtime(rt_rq); in balance_runtime()
417 spin_lock(&rt_rq->rt_runtime_lock); in balance_runtime()
423 static inline int balance_runtime(struct rt_rq *rt_rq) in balance_runtime() argument
440 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); in do_sched_rt_period_timer() local
441 struct rq *rq = rq_of_rt_rq(rt_rq); in do_sched_rt_period_timer()
444 if (rt_rq->rt_time) { in do_sched_rt_period_timer()
447 spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
448 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
449 balance_runtime(rt_rq); in do_sched_rt_period_timer()
450 runtime = rt_rq->rt_runtime; in do_sched_rt_period_timer()
451 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); in do_sched_rt_period_timer()
452 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { in do_sched_rt_period_timer()
453 rt_rq->rt_throttled = 0; in do_sched_rt_period_timer()
456 if (rt_rq->rt_time || rt_rq->rt_nr_running) in do_sched_rt_period_timer()
458 spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
459 } else if (rt_rq->rt_nr_running) in do_sched_rt_period_timer()
463 sched_rt_rq_enqueue(rt_rq); in do_sched_rt_period_timer()
473 struct rt_rq *rt_rq = group_rt_rq(rt_se); in rt_se_prio() local
475 if (rt_rq) in rt_se_prio()
476 return rt_rq->highest_prio; in rt_se_prio()
482 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) in sched_rt_runtime_exceeded() argument
484 u64 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
486 if (rt_rq->rt_throttled) in sched_rt_runtime_exceeded()
487 return rt_rq_throttled(rt_rq); in sched_rt_runtime_exceeded()
489 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq)) in sched_rt_runtime_exceeded()
492 balance_runtime(rt_rq); in sched_rt_runtime_exceeded()
493 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
497 if (rt_rq->rt_time > runtime) { in sched_rt_runtime_exceeded()
498 rt_rq->rt_throttled = 1; in sched_rt_runtime_exceeded()
499 if (rt_rq_throttled(rt_rq)) { in sched_rt_runtime_exceeded()
500 sched_rt_rq_dequeue(rt_rq); in sched_rt_runtime_exceeded()
516 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in update_curr_rt() local
538 rt_rq = rt_rq_of_se(rt_se); in update_curr_rt()
540 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { in update_curr_rt()
541 spin_lock(&rt_rq->rt_runtime_lock); in update_curr_rt()
542 rt_rq->rt_time += delta_exec; in update_curr_rt()
543 if (sched_rt_runtime_exceeded(rt_rq)) in update_curr_rt()
545 spin_unlock(&rt_rq->rt_runtime_lock); in update_curr_rt()
551 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_tasks() argument
554 rt_rq->rt_nr_running++; in inc_rt_tasks()
556 if (rt_se_prio(rt_se) < rt_rq->highest_prio) { in inc_rt_tasks()
558 struct rq *rq = rq_of_rt_rq(rt_rq); in inc_rt_tasks()
561 rt_rq->highest_prio = rt_se_prio(rt_se); in inc_rt_tasks()
571 struct rq *rq = rq_of_rt_rq(rt_rq); in inc_rt_tasks()
576 update_rt_migration(rq_of_rt_rq(rt_rq)); in inc_rt_tasks()
580 rt_rq->rt_nr_boosted++; in inc_rt_tasks()
582 if (rt_rq->tg) in inc_rt_tasks()
583 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); in inc_rt_tasks()
590 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_tasks() argument
593 int highest_prio = rt_rq->highest_prio; in dec_rt_tasks()
597 WARN_ON(!rt_rq->rt_nr_running); in dec_rt_tasks()
598 rt_rq->rt_nr_running--; in dec_rt_tasks()
600 if (rt_rq->rt_nr_running) { in dec_rt_tasks()
603 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio); in dec_rt_tasks()
604 if (rt_se_prio(rt_se) == rt_rq->highest_prio) { in dec_rt_tasks()
606 array = &rt_rq->active; in dec_rt_tasks()
607 rt_rq->highest_prio = in dec_rt_tasks()
611 rt_rq->highest_prio = MAX_RT_PRIO; in dec_rt_tasks()
615 struct rq *rq = rq_of_rt_rq(rt_rq); in dec_rt_tasks()
619 if (rt_rq->highest_prio != highest_prio) { in dec_rt_tasks()
620 struct rq *rq = rq_of_rt_rq(rt_rq); in dec_rt_tasks()
624 rt_rq->highest_prio); in dec_rt_tasks()
627 update_rt_migration(rq_of_rt_rq(rt_rq)); in dec_rt_tasks()
631 rt_rq->rt_nr_boosted--; in dec_rt_tasks()
633 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); in dec_rt_tasks()
639 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in __enqueue_rt_entity() local
640 struct rt_prio_array *array = &rt_rq->active; in __enqueue_rt_entity()
641 struct rt_rq *group_rq = group_rt_rq(rt_se); in __enqueue_rt_entity()
656 inc_rt_tasks(rt_se, rt_rq); in __enqueue_rt_entity()
661 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in __dequeue_rt_entity() local
662 struct rt_prio_array *array = &rt_rq->active; in __dequeue_rt_entity()
668 dec_rt_tasks(rt_se, rt_rq); in __dequeue_rt_entity()
702 struct rt_rq *rt_rq = group_rt_rq(rt_se); in dequeue_rt_entity() local
704 if (rt_rq && rt_rq->rt_nr_running) in dequeue_rt_entity()
739 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) in requeue_rt_entity() argument
742 struct rt_prio_array *array = &rt_rq->active; in requeue_rt_entity()
755 struct rt_rq *rt_rq; in requeue_task_rt() local
758 rt_rq = rt_rq_of_se(rt_se); in requeue_task_rt()
759 requeue_rt_entity(rt_rq, rt_se, head); in requeue_task_rt()
865 struct rt_rq *rt_rq) in pick_next_rt_entity() argument
867 struct rt_prio_array *array = &rt_rq->active; in pick_next_rt_entity()
885 struct rt_rq *rt_rq; in pick_next_task_rt() local
887 rt_rq = &rq->rt; in pick_next_task_rt()
889 if (unlikely(!rt_rq->rt_nr_running)) in pick_next_task_rt()
892 if (rt_rq_throttled(rt_rq)) in pick_next_task_rt()
896 rt_se = pick_next_rt_entity(rq, rt_rq); in pick_next_task_rt()
898 rt_rq = group_rt_rq(rt_se); in pick_next_task_rt()
899 } while (rt_rq); in pick_next_task_rt()
934 struct rt_rq *rt_rq; in pick_next_highest_task_rt() local
937 for_each_leaf_rt_rq(rt_rq, rq) { in pick_next_highest_task_rt()
938 array = &rt_rq->active; in pick_next_highest_task_rt()
1547 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1551 struct rt_rq *rt_rq; in print_rt_stats() local
1554 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu)) in print_rt_stats()
1555 print_rt_rq(m, cpu, rt_rq); in print_rt_stats()