Lines Matching refs:rt_bandwidth
17 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
19 struct rt_bandwidth def_rt_bandwidth;
23 struct rt_bandwidth *rt_b = in sched_rt_period_timer()
24 container_of(timer, struct rt_bandwidth, rt_period_timer); in sched_rt_period_timer()
45 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) in init_rt_bandwidth()
57 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b) in do_start_rt_bandwidth()
77 static void start_rt_bandwidth(struct rt_bandwidth *rt_b) in start_rt_bandwidth()
115 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) in destroy_rt_bandwidth()
150 destroy_rt_bandwidth(&tg->rt_bandwidth); in unregister_rt_sched_group()
209 init_rt_bandwidth(&tg->rt_bandwidth, in alloc_rt_sched_group()
224 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; in alloc_rt_sched_group()
507 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); in sched_rt_period()
608 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
610 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; in sched_rt_period_rt_rq()
613 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth()
615 return &rt_rq->tg->rt_bandwidth; in sched_rt_bandwidth()
670 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
675 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth()
684 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in sched_rt_bandwidth_account()
696 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in do_balance_runtime()
756 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __disable_runtime()
840 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __enable_runtime()
867 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) in do_sched_rt_period_timer()
883 if (rt_b == &root_task_group.rt_bandwidth) in do_sched_rt_period_timer()
980 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in sched_rt_runtime_exceeded()
1201 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); in inc_rt_group()
2709 period = ktime_to_ns(tg->rt_bandwidth.rt_period); in tg_rt_schedulable()
2710 runtime = tg->rt_bandwidth.rt_runtime; in tg_rt_schedulable()
2727 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) in tg_rt_schedulable()
2742 period = ktime_to_ns(child->rt_bandwidth.rt_period); in tg_rt_schedulable()
2743 runtime = child->rt_bandwidth.rt_runtime; in tg_rt_schedulable()
2803 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_rt_bandwidth()
2804 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); in tg_set_rt_bandwidth()
2805 tg->rt_bandwidth.rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
2814 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_rt_bandwidth()
2825 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_set_rt_runtime()
2839 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) in sched_group_rt_runtime()
2842 rt_runtime_us = tg->rt_bandwidth.rt_runtime; in sched_group_rt_runtime()
2855 rt_runtime = tg->rt_bandwidth.rt_runtime; in sched_group_set_rt_period()
2864 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_rt_period()
2883 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) in sched_rt_can_attach()