• Home
  • Raw
  • Download

Lines Matching refs:rt_rq

136 void init_rt_rq(struct rt_rq *rt_rq)  in init_rt_rq()  argument
141 array = &rt_rq->active; in init_rt_rq()
150 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_rt_rq()
151 rt_rq->highest_prio.next = MAX_RT_PRIO-1; in init_rt_rq()
152 rt_rq->rt_nr_migratory = 0; in init_rt_rq()
153 rt_rq->overloaded = 0; in init_rt_rq()
154 plist_head_init(&rt_rq->pushable_tasks); in init_rt_rq()
157 rt_rq->rt_queued = 0; in init_rt_rq()
159 rt_rq->rt_time = 0; in init_rt_rq()
160 rt_rq->rt_throttled = 0; in init_rt_rq()
161 rt_rq->rt_runtime = 0; in init_rt_rq()
162 raw_spin_lock_init(&rt_rq->rt_runtime_lock); in init_rt_rq()
181 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() argument
183 return rt_rq->rq; in rq_of_rt_rq()
186 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) in rt_rq_of_se()
188 return rt_se->rt_rq; in rt_rq_of_se()
193 struct rt_rq *rt_rq = rt_se->rt_rq; in rq_of_rt_se() local
195 return rt_rq->rq; in rq_of_rt_se()
210 if (tg->rt_rq) in free_rt_sched_group()
211 kfree(tg->rt_rq[i]); in free_rt_sched_group()
216 kfree(tg->rt_rq); in free_rt_sched_group()
220 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, in init_tg_rt_entry() argument
226 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_tg_rt_entry()
227 rt_rq->rt_nr_boosted = 0; in init_tg_rt_entry()
228 rt_rq->rq = rq; in init_tg_rt_entry()
229 rt_rq->tg = tg; in init_tg_rt_entry()
231 tg->rt_rq[cpu] = rt_rq; in init_tg_rt_entry()
238 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
240 rt_se->rt_rq = parent->my_q; in init_tg_rt_entry()
242 rt_se->my_q = rt_rq; in init_tg_rt_entry()
249 struct rt_rq *rt_rq; in alloc_rt_sched_group() local
253 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); in alloc_rt_sched_group()
254 if (!tg->rt_rq) in alloc_rt_sched_group()
264 rt_rq = kzalloc_node(sizeof(struct rt_rq), in alloc_rt_sched_group()
266 if (!rt_rq) in alloc_rt_sched_group()
274 init_rt_rq(rt_rq); in alloc_rt_sched_group()
275 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; in alloc_rt_sched_group()
276 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); in alloc_rt_sched_group()
282 kfree(rt_rq); in alloc_rt_sched_group()
296 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() argument
298 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
308 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) in rt_rq_of_se()
367 static void update_rt_migration(struct rt_rq *rt_rq) in update_rt_migration() argument
369 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { in update_rt_migration()
370 if (!rt_rq->overloaded) { in update_rt_migration()
371 rt_set_overload(rq_of_rt_rq(rt_rq)); in update_rt_migration()
372 rt_rq->overloaded = 1; in update_rt_migration()
374 } else if (rt_rq->overloaded) { in update_rt_migration()
375 rt_clear_overload(rq_of_rt_rq(rt_rq)); in update_rt_migration()
376 rt_rq->overloaded = 0; in update_rt_migration()
380 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_migration() argument
388 rt_rq = &rq_of_rt_rq(rt_rq)->rt; in inc_rt_migration()
390 rt_rq->rt_nr_total++; in inc_rt_migration()
392 rt_rq->rt_nr_migratory++; in inc_rt_migration()
394 update_rt_migration(rt_rq); in inc_rt_migration()
397 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_migration() argument
405 rt_rq = &rq_of_rt_rq(rt_rq)->rt; in dec_rt_migration()
407 rt_rq->rt_nr_total--; in dec_rt_migration()
409 rt_rq->rt_nr_migratory--; in dec_rt_migration()
411 update_rt_migration(rt_rq); in dec_rt_migration()
474 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_migration() argument
479 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_migration() argument
488 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
489 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
537 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) in sched_rt_runtime() argument
539 if (!rt_rq->tg) in sched_rt_runtime()
542 return rt_rq->rt_runtime; in sched_rt_runtime()
545 static inline u64 sched_rt_period(struct rt_rq *rt_rq) in sched_rt_period() argument
547 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); in sched_rt_period()
565 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
568 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
573 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) in group_rt_rq()
581 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) in sched_rt_rq_enqueue() argument
583 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue()
584 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue()
589 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_enqueue()
591 if (rt_rq->rt_nr_running) { in sched_rt_rq_enqueue()
593 enqueue_top_rt_rq(rt_rq); in sched_rt_rq_enqueue()
597 if (rt_rq->highest_prio.curr < curr->prio) in sched_rt_rq_enqueue()
602 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) in sched_rt_rq_dequeue() argument
605 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue()
607 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_dequeue()
610 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); in sched_rt_rq_dequeue()
612 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); in sched_rt_rq_dequeue()
618 static inline int rt_rq_throttled(struct rt_rq *rt_rq) in rt_rq_throttled() argument
620 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; in rt_rq_throttled()
625 struct rt_rq *rt_rq = group_rt_rq(rt_se); in rt_se_boosted() local
628 if (rt_rq) in rt_se_boosted()
629 return !!rt_rq->rt_nr_boosted; in rt_se_boosted()
648 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
650 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; in sched_rt_period_rt_rq()
653 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth() argument
655 return &rt_rq->tg->rt_bandwidth; in sched_rt_bandwidth()
660 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) in sched_rt_runtime() argument
662 return rt_rq->rt_runtime; in sched_rt_runtime()
665 static inline u64 sched_rt_period(struct rt_rq *rt_rq) in sched_rt_period() argument
670 typedef struct rt_rq *rt_rq_iter_t;
672 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
673 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
678 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) in group_rt_rq()
683 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) in sched_rt_rq_enqueue() argument
685 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue()
687 if (!rt_rq->rt_nr_running) in sched_rt_rq_enqueue()
690 enqueue_top_rt_rq(rt_rq); in sched_rt_rq_enqueue()
694 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) in sched_rt_rq_dequeue() argument
696 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); in sched_rt_rq_dequeue()
699 static inline int rt_rq_throttled(struct rt_rq *rt_rq) in rt_rq_throttled() argument
701 return rt_rq->rt_throttled; in rt_rq_throttled()
710 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
715 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth() argument
722 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) in sched_rt_bandwidth_account() argument
724 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in sched_rt_bandwidth_account()
727 rt_rq->rt_time < rt_b->rt_runtime); in sched_rt_bandwidth_account()
734 static void do_balance_runtime(struct rt_rq *rt_rq) in do_balance_runtime() argument
736 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in do_balance_runtime()
737 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; in do_balance_runtime()
746 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in do_balance_runtime()
749 if (iter == rt_rq) in do_balance_runtime()
768 if (rt_rq->rt_runtime + diff > rt_period) in do_balance_runtime()
769 diff = rt_period - rt_rq->rt_runtime; in do_balance_runtime()
771 rt_rq->rt_runtime += diff; in do_balance_runtime()
772 if (rt_rq->rt_runtime == rt_period) { in do_balance_runtime()
790 struct rt_rq *rt_rq; in __disable_runtime() local
795 for_each_rt_rq(rt_rq, iter, rq) { in __disable_runtime()
796 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __disable_runtime()
801 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
807 if (rt_rq->rt_runtime == RUNTIME_INF || in __disable_runtime()
808 rt_rq->rt_runtime == rt_b->rt_runtime) in __disable_runtime()
810 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
817 want = rt_b->rt_runtime - rt_rq->rt_runtime; in __disable_runtime()
823 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in __disable_runtime()
829 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) in __disable_runtime()
847 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
858 rt_rq->rt_runtime = RUNTIME_INF; in __disable_runtime()
859 rt_rq->rt_throttled = 0; in __disable_runtime()
860 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
864 sched_rt_rq_enqueue(rt_rq); in __disable_runtime()
871 struct rt_rq *rt_rq; in __enable_runtime() local
879 for_each_rt_rq(rt_rq, iter, rq) { in __enable_runtime()
880 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __enable_runtime()
883 raw_spin_lock(&rt_rq->rt_runtime_lock); in __enable_runtime()
884 rt_rq->rt_runtime = rt_b->rt_runtime; in __enable_runtime()
885 rt_rq->rt_time = 0; in __enable_runtime()
886 rt_rq->rt_throttled = 0; in __enable_runtime()
887 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __enable_runtime()
892 static void balance_runtime(struct rt_rq *rt_rq) in balance_runtime() argument
897 if (rt_rq->rt_time > rt_rq->rt_runtime) { in balance_runtime()
898 raw_spin_unlock(&rt_rq->rt_runtime_lock); in balance_runtime()
899 do_balance_runtime(rt_rq); in balance_runtime()
900 raw_spin_lock(&rt_rq->rt_runtime_lock); in balance_runtime()
904 static inline void balance_runtime(struct rt_rq *rt_rq) {} in balance_runtime() argument
928 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); in do_sched_rt_period_timer() local
929 struct rq *rq = rq_of_rt_rq(rt_rq); in do_sched_rt_period_timer()
937 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
938 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) in do_sched_rt_period_timer()
939 rt_rq->rt_runtime = rt_b->rt_runtime; in do_sched_rt_period_timer()
940 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; in do_sched_rt_period_timer()
941 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
948 if (rt_rq->rt_time) { in do_sched_rt_period_timer()
951 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
952 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
953 balance_runtime(rt_rq); in do_sched_rt_period_timer()
954 runtime = rt_rq->rt_runtime; in do_sched_rt_period_timer()
955 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); in do_sched_rt_period_timer()
956 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { in do_sched_rt_period_timer()
957 rt_rq->rt_throttled = 0; in do_sched_rt_period_timer()
967 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
970 if (rt_rq->rt_time || rt_rq->rt_nr_running) in do_sched_rt_period_timer()
972 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
973 } else if (rt_rq->rt_nr_running) { in do_sched_rt_period_timer()
975 if (!rt_rq_throttled(rt_rq)) in do_sched_rt_period_timer()
978 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
982 sched_rt_rq_enqueue(rt_rq); in do_sched_rt_period_timer()
995 struct rt_rq *rt_rq = group_rt_rq(rt_se); in rt_se_prio() local
997 if (rt_rq) in rt_se_prio()
998 return rt_rq->highest_prio.curr; in rt_se_prio()
1004 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) in sched_rt_runtime_exceeded() argument
1006 u64 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
1008 if (rt_rq->rt_throttled) in sched_rt_runtime_exceeded()
1009 return rt_rq_throttled(rt_rq); in sched_rt_runtime_exceeded()
1011 if (runtime >= sched_rt_period(rt_rq)) in sched_rt_runtime_exceeded()
1014 balance_runtime(rt_rq); in sched_rt_runtime_exceeded()
1015 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
1019 if (rt_rq->rt_time > runtime) { in sched_rt_runtime_exceeded()
1020 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in sched_rt_runtime_exceeded()
1027 rt_rq->rt_throttled = 1; in sched_rt_runtime_exceeded()
1032 rq_clock(rq_of_rt_rq(rt_rq)), in sched_rt_runtime_exceeded()
1033 sched_rt_period(rt_rq), in sched_rt_runtime_exceeded()
1042 rt_rq->rt_time = 0; in sched_rt_runtime_exceeded()
1045 if (rt_rq_throttled(rt_rq)) { in sched_rt_runtime_exceeded()
1046 sched_rt_rq_dequeue(rt_rq); in sched_rt_runtime_exceeded()
1086 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in update_curr_rt() local
1089 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { in update_curr_rt()
1090 raw_spin_lock(&rt_rq->rt_runtime_lock); in update_curr_rt()
1091 rt_rq->rt_time += delta_exec; in update_curr_rt()
1092 exceeded = sched_rt_runtime_exceeded(rt_rq); in update_curr_rt()
1095 raw_spin_unlock(&rt_rq->rt_runtime_lock); in update_curr_rt()
1097 do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq)); in update_curr_rt()
1103 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count) in dequeue_top_rt_rq() argument
1105 struct rq *rq = rq_of_rt_rq(rt_rq); in dequeue_top_rt_rq()
1107 BUG_ON(&rq->rt != rt_rq); in dequeue_top_rt_rq()
1109 if (!rt_rq->rt_queued) in dequeue_top_rt_rq()
1115 rt_rq->rt_queued = 0; in dequeue_top_rt_rq()
1120 enqueue_top_rt_rq(struct rt_rq *rt_rq) in enqueue_top_rt_rq() argument
1122 struct rq *rq = rq_of_rt_rq(rt_rq); in enqueue_top_rt_rq()
1124 BUG_ON(&rq->rt != rt_rq); in enqueue_top_rt_rq()
1126 if (rt_rq->rt_queued) in enqueue_top_rt_rq()
1129 if (rt_rq_throttled(rt_rq)) in enqueue_top_rt_rq()
1132 if (rt_rq->rt_nr_running) { in enqueue_top_rt_rq()
1133 add_nr_running(rq, rt_rq->rt_nr_running); in enqueue_top_rt_rq()
1134 rt_rq->rt_queued = 1; in enqueue_top_rt_rq()
1144 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) in inc_rt_prio_smp() argument
1146 struct rq *rq = rq_of_rt_rq(rt_rq); in inc_rt_prio_smp()
1152 if (&rq->rt != rt_rq) in inc_rt_prio_smp()
1160 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) in dec_rt_prio_smp() argument
1162 struct rq *rq = rq_of_rt_rq(rt_rq); in dec_rt_prio_smp()
1168 if (&rq->rt != rt_rq) in dec_rt_prio_smp()
1171 if (rq->online && rt_rq->highest_prio.curr != prev_prio) in dec_rt_prio_smp()
1172 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); in dec_rt_prio_smp()
1178 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} in inc_rt_prio_smp() argument
1180 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} in dec_rt_prio_smp() argument
1186 inc_rt_prio(struct rt_rq *rt_rq, int prio) in inc_rt_prio() argument
1188 int prev_prio = rt_rq->highest_prio.curr; in inc_rt_prio()
1191 rt_rq->highest_prio.curr = prio; in inc_rt_prio()
1193 inc_rt_prio_smp(rt_rq, prio, prev_prio); in inc_rt_prio()
1197 dec_rt_prio(struct rt_rq *rt_rq, int prio) in dec_rt_prio() argument
1199 int prev_prio = rt_rq->highest_prio.curr; in dec_rt_prio()
1201 if (rt_rq->rt_nr_running) { in dec_rt_prio()
1210 struct rt_prio_array *array = &rt_rq->active; in dec_rt_prio()
1212 rt_rq->highest_prio.curr = in dec_rt_prio()
1217 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in dec_rt_prio()
1220 dec_rt_prio_smp(rt_rq, prio, prev_prio); in dec_rt_prio()
1225 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} in inc_rt_prio() argument
1226 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} in dec_rt_prio() argument
1233 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_group() argument
1236 rt_rq->rt_nr_boosted++; in inc_rt_group()
1238 if (rt_rq->tg) in inc_rt_group()
1239 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); in inc_rt_group()
1243 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_group() argument
1246 rt_rq->rt_nr_boosted--; in dec_rt_group()
1248 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); in dec_rt_group()
1254 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_group() argument
1260 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} in dec_rt_group() argument
1267 struct rt_rq *group_rq = group_rt_rq(rt_se); in rt_se_nr_running()
1278 struct rt_rq *group_rq = group_rt_rq(rt_se); in rt_se_rr_nr_running()
1290 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_tasks() argument
1295 rt_rq->rt_nr_running += rt_se_nr_running(rt_se); in inc_rt_tasks()
1296 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); in inc_rt_tasks()
1298 inc_rt_prio(rt_rq, prio); in inc_rt_tasks()
1299 inc_rt_migration(rt_se, rt_rq); in inc_rt_tasks()
1300 inc_rt_group(rt_se, rt_rq); in inc_rt_tasks()
1304 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_tasks() argument
1307 WARN_ON(!rt_rq->rt_nr_running); in dec_rt_tasks()
1308 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); in dec_rt_tasks()
1309 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); in dec_rt_tasks()
1311 dec_rt_prio(rt_rq, rt_se_prio(rt_se)); in dec_rt_tasks()
1312 dec_rt_migration(rt_se, rt_rq); in dec_rt_tasks()
1313 dec_rt_group(rt_se, rt_rq); in dec_rt_tasks()
1352 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) in update_stats_wait_start_rt() argument
1367 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_start_rt()
1371 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) in update_stats_enqueue_sleeper_rt() argument
1386 __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats); in update_stats_enqueue_sleeper_rt()
1390 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, in update_stats_enqueue_rt() argument
1397 update_stats_enqueue_sleeper_rt(rt_rq, rt_se); in update_stats_enqueue_rt()
1401 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) in update_stats_wait_end_rt() argument
1416 __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_end_rt()
1420 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, in update_stats_dequeue_rt() argument
1437 rq_clock(rq_of_rt_rq(rt_rq))); in update_stats_dequeue_rt()
1441 rq_clock(rq_of_rt_rq(rt_rq))); in update_stats_dequeue_rt()
1447 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in __enqueue_rt_entity() local
1448 struct rt_prio_array *array = &rt_rq->active; in __enqueue_rt_entity()
1449 struct rt_rq *group_rq = group_rt_rq(rt_se); in __enqueue_rt_entity()
1476 inc_rt_tasks(rt_se, rt_rq); in __enqueue_rt_entity()
1481 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in __dequeue_rt_entity() local
1482 struct rt_prio_array *array = &rt_rq->active; in __dequeue_rt_entity()
1490 dec_rt_tasks(rt_se, rt_rq); in __dequeue_rt_entity()
1538 struct rt_rq *rt_rq = group_rt_rq(rt_se); in dequeue_rt_entity() local
1540 if (rt_rq && rt_rq->rt_nr_running) in dequeue_rt_entity()
1604 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) in requeue_rt_entity() argument
1607 struct rt_prio_array *array = &rt_rq->active; in requeue_rt_entity()
1620 struct rt_rq *rt_rq; in requeue_task_rt() local
1623 rt_rq = rt_rq_of_se(rt_se); in requeue_task_rt()
1624 requeue_rt_entity(rt_rq, rt_se, head); in requeue_task_rt()
1837 struct rt_rq *rt_rq = &rq->rt; in set_next_task_rt() local
1841 update_stats_wait_end_rt(rt_rq, rt_se); in set_next_task_rt()
1861 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq) in pick_next_rt_entity() argument
1863 struct rt_prio_array *array = &rt_rq->active; in pick_next_rt_entity()
1882 struct rt_rq *rt_rq = &rq->rt; in _pick_next_task_rt() local
1885 rt_se = pick_next_rt_entity(rt_rq); in _pick_next_task_rt()
1888 rt_rq = group_rt_rq(rt_se); in _pick_next_task_rt()
1889 } while (rt_rq); in _pick_next_task_rt()
1919 struct rt_rq *rt_rq = &rq->rt; in put_prev_task_rt() local
1922 update_stats_wait_start_rt(rt_rq, rt_se); in put_prev_task_rt()
2971 struct rt_rq *rt_rq = tg->rt_rq[i]; in tg_set_rt_bandwidth() local
2973 raw_spin_lock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2974 rt_rq->rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
2975 raw_spin_unlock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
3064 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints() local
3066 raw_spin_lock(&rt_rq->rt_runtime_lock); in sched_rt_global_constraints()
3067 rt_rq->rt_runtime = global_rt_runtime(); in sched_rt_global_constraints()
3068 raw_spin_unlock(&rt_rq->rt_runtime_lock); in sched_rt_global_constraints()
3168 struct rt_rq *rt_rq; in print_rt_stats() local
3171 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
3172 print_rt_rq(m, cpu, rt_rq); in print_rt_stats()