Lines Matching refs:rq
9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
11 if (rq) { in rq_sched_info_arrive()
12 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
13 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
21 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
23 if (rq) in rq_sched_info_depart()
24 rq->rq_cpu_time += delta; in rq_sched_info_depart()
28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeued() argument
30 if (rq) in rq_sched_info_dequeued()
31 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeued()
44 static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { } in rq_sched_info_arrive() argument
45 static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { } in rq_sched_info_dequeued() argument
46 static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { } in rq_sched_info_depart() argument
114 struct rq *rq; in psi_ttwu_dequeue() local
122 rq = __task_rq_lock(p, &rf); in psi_ttwu_dequeue()
125 __task_rq_unlock(rq, &rf); in psi_ttwu_dequeue()
129 static inline void psi_task_tick(struct rq *rq) in psi_task_tick() argument
134 if (unlikely(rq->curr->flags & PF_MEMSTALL)) in psi_task_tick()
135 psi_memstall_tick(rq->curr, cpu_of(rq)); in psi_task_tick()
141 static inline void psi_task_tick(struct rq *rq) {} in psi_task_tick() argument
156 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) in sched_info_dequeued() argument
158 unsigned long long now = rq_clock(rq), delta = 0; in sched_info_dequeued()
167 rq_sched_info_dequeued(rq, delta); in sched_info_dequeued()
175 static void sched_info_arrive(struct rq *rq, struct task_struct *t) in sched_info_arrive() argument
177 unsigned long long now = rq_clock(rq), delta = 0; in sched_info_arrive()
186 rq_sched_info_arrive(rq, delta); in sched_info_arrive()
194 static inline void sched_info_queued(struct rq *rq, struct task_struct *t) in sched_info_queued() argument
198 t->sched_info.last_queued = rq_clock(rq); in sched_info_queued()
210 static inline void sched_info_depart(struct rq *rq, struct task_struct *t) in sched_info_depart() argument
212 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival; in sched_info_depart()
214 rq_sched_info_depart(rq, delta); in sched_info_depart()
217 sched_info_queued(rq, t); in sched_info_depart()
226 __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) in __sched_info_switch() argument
233 if (prev != rq->idle) in __sched_info_switch()
234 sched_info_depart(rq, prev); in __sched_info_switch()
236 if (next != rq->idle) in __sched_info_switch()
237 sched_info_arrive(rq, next); in __sched_info_switch()
241 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) in sched_info_switch() argument
244 __sched_info_switch(rq, prev, next); in sched_info_switch()
248 # define sched_info_queued(rq, t) do { } while (0) argument
250 # define sched_info_dequeued(rq, t) do { } while (0) argument
251 # define sched_info_depart(rq, t) do { } while (0) argument
252 # define sched_info_arrive(rq, next) do { } while (0) argument
253 # define sched_info_switch(rq, t, next) do { } while (0) argument