Lines Matching refs:rq
7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
11 int update_irq_load_avg(struct rq *rq, u64 running);
14 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
58 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) in update_rq_clock_pelt() argument
60 if (unlikely(is_idle_task(rq->curr))) { in update_rq_clock_pelt()
62 rq->clock_pelt = rq_clock_task(rq); in update_rq_clock_pelt()
82 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt()
83 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt()
85 rq->clock_pelt += delta; in update_rq_clock_pelt()
97 static inline void update_idle_rq_clock_pelt(struct rq *rq) in update_idle_rq_clock_pelt() argument
100 u32 util_sum = rq->cfs.avg.util_sum; in update_idle_rq_clock_pelt()
101 util_sum += rq->avg_rt.util_sum; in update_idle_rq_clock_pelt()
102 util_sum += rq->avg_dl.util_sum; in update_idle_rq_clock_pelt()
114 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt; in update_idle_rq_clock_pelt()
117 static inline u64 rq_clock_pelt(struct rq *rq) in rq_clock_pelt() argument
119 lockdep_assert_held(&rq->lock); in rq_clock_pelt()
120 assert_clock_updated(rq); in rq_clock_pelt()
122 return rq->clock_pelt - rq->lost_idle_time; in rq_clock_pelt()
150 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument
156 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
162 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
167 static inline u64 rq_clock_pelt(struct rq *rq) in rq_clock_pelt() argument
169 return rq_clock_task(rq); in rq_clock_pelt()
173 update_rq_clock_pelt(struct rq *rq, s64 delta) { } in update_rq_clock_pelt() argument
176 update_idle_rq_clock_pelt(struct rq *rq) { } in update_idle_rq_clock_pelt() argument