Lines Matching refs:rq
84 struct rq;
96 extern void calc_global_load_tick(struct rq *this_rq);
97 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
556 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
625 struct rq *rq; member
801 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
861 struct rq { struct
1021 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
1023 return cfs_rq->rq; in rq_of()
1028 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
1030 return container_of(cfs_rq, struct rq, cfs); in rq_of()
1034 static inline int cpu_of(struct rq *rq) in cpu_of() argument
1037 return rq->cpu; in cpu_of()
1045 extern void __update_idle_core(struct rq *rq);
1047 static inline void update_idle_core(struct rq *rq) in update_idle_core() argument
1050 __update_idle_core(rq); in update_idle_core()
1054 static inline void update_idle_core(struct rq *rq) { } in update_idle_core() argument
1057 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1065 extern void update_rq_clock(struct rq *rq);
1067 static inline u64 __rq_clock_broken(struct rq *rq) in __rq_clock_broken() argument
1069 return READ_ONCE(rq->clock); in __rq_clock_broken()
1099 static inline void assert_clock_updated(struct rq *rq) in assert_clock_updated() argument
1105 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); in assert_clock_updated()
1108 static inline u64 rq_clock(struct rq *rq) in rq_clock() argument
1110 lockdep_assert_held(&rq->lock); in rq_clock()
1111 assert_clock_updated(rq); in rq_clock()
1113 return rq->clock; in rq_clock()
1116 static inline u64 rq_clock_task(struct rq *rq) in rq_clock_task() argument
1118 lockdep_assert_held(&rq->lock); in rq_clock_task()
1119 assert_clock_updated(rq); in rq_clock_task()
1121 return rq->clock_task; in rq_clock_task()
1124 static inline void rq_clock_skip_update(struct rq *rq) in rq_clock_skip_update() argument
1126 lockdep_assert_held(&rq->lock); in rq_clock_skip_update()
1127 rq->clock_update_flags |= RQCF_REQ_SKIP; in rq_clock_skip_update()
1134 static inline void rq_clock_cancel_skipupdate(struct rq *rq) in rq_clock_cancel_skipupdate() argument
1136 lockdep_assert_held(&rq->lock); in rq_clock_cancel_skipupdate()
1137 rq->clock_update_flags &= ~RQCF_REQ_SKIP; in rq_clock_cancel_skipupdate()
1153 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock() argument
1155 rf->cookie = lockdep_pin_lock(&rq->lock); in rq_pin_lock()
1158 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); in rq_pin_lock()
1163 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock() argument
1166 if (rq->clock_update_flags > RQCF_ACT_SKIP) in rq_unpin_lock()
1170 lockdep_unpin_lock(&rq->lock, rf->cookie); in rq_unpin_lock()
1173 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock() argument
1175 lockdep_repin_lock(&rq->lock, rf->cookie); in rq_repin_lock()
1181 rq->clock_update_flags |= rf->clock_update_flags; in rq_repin_lock()
1185 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1186 __acquires(rq->lock);
1188 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1190 __acquires(rq->lock);
1192 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) in __task_rq_unlock() argument
1193 __releases(rq->lock) in __task_rq_unlock()
1195 rq_unpin_lock(rq, rf); in __task_rq_unlock()
1196 raw_spin_unlock(&rq->lock); in __task_rq_unlock()
1200 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock() argument
1201 __releases(rq->lock) in task_rq_unlock()
1204 rq_unpin_lock(rq, rf); in task_rq_unlock()
1205 raw_spin_unlock(&rq->lock); in task_rq_unlock()
1210 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave() argument
1211 __acquires(rq->lock) in rq_lock_irqsave()
1213 raw_spin_lock_irqsave(&rq->lock, rf->flags); in rq_lock_irqsave()
1214 rq_pin_lock(rq, rf); in rq_lock_irqsave()
1218 rq_lock_irq(struct rq *rq, struct rq_flags *rf) in rq_lock_irq() argument
1219 __acquires(rq->lock) in rq_lock_irq()
1221 raw_spin_lock_irq(&rq->lock); in rq_lock_irq()
1222 rq_pin_lock(rq, rf); in rq_lock_irq()
1226 rq_lock(struct rq *rq, struct rq_flags *rf) in rq_lock() argument
1227 __acquires(rq->lock) in rq_lock()
1229 raw_spin_lock(&rq->lock); in rq_lock()
1230 rq_pin_lock(rq, rf); in rq_lock()
1234 rq_relock(struct rq *rq, struct rq_flags *rf) in rq_relock() argument
1235 __acquires(rq->lock) in rq_relock()
1237 raw_spin_lock(&rq->lock); in rq_relock()
1238 rq_repin_lock(rq, rf); in rq_relock()
1242 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) in rq_unlock_irqrestore() argument
1243 __releases(rq->lock) in rq_unlock_irqrestore()
1245 rq_unpin_lock(rq, rf); in rq_unlock_irqrestore()
1246 raw_spin_unlock_irqrestore(&rq->lock, rf->flags); in rq_unlock_irqrestore()
1250 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) in rq_unlock_irq() argument
1251 __releases(rq->lock) in rq_unlock_irq()
1253 rq_unpin_lock(rq, rf); in rq_unlock_irq()
1254 raw_spin_unlock_irq(&rq->lock); in rq_unlock_irq()
1258 rq_unlock(struct rq *rq, struct rq_flags *rf) in rq_unlock() argument
1259 __releases(rq->lock) in rq_unlock()
1261 rq_unpin_lock(rq, rf); in rq_unlock()
1262 raw_spin_unlock(&rq->lock); in rq_unlock()
1265 static inline struct rq *
1267 __acquires(rq->lock) in this_rq_lock_irq()
1269 struct rq *rq; in this_rq_lock_irq() local
1272 rq = this_rq(); in this_rq_lock_irq()
1273 rq_lock(rq, rf); in this_rq_lock_irq()
1274 return rq; in this_rq_lock_irq()
1323 queue_balance_callback(struct rq *rq, in queue_balance_callback() argument
1325 void (*func)(struct rq *rq)) in queue_balance_callback() argument
1327 lockdep_assert_held(&rq->lock); in queue_balance_callback()
1333 head->next = rq->balance_callback; in queue_balance_callback()
1334 rq->balance_callback = head; in queue_balance_callback()
1474 extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
1480 static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; } in newidle_balance()
1636 static inline int task_current(struct rq *rq, struct task_struct *p) in task_current() argument
1638 return rq->curr == p; in task_current()
1641 static inline int task_running(struct rq *rq, struct task_struct *p) in task_running() argument
1646 return task_current(rq, p); in task_running()
1728 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1729 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1730 void (*yield_task) (struct rq *rq);
1731 bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
1733 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
1746 struct task_struct * (*pick_next_task)(struct rq *rq,
1749 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1750 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
1753 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1757 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1762 void (*rq_online)(struct rq *rq);
1763 void (*rq_offline)(struct rq *rq);
1766 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1775 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1776 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1777 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1780 unsigned int (*get_rr_interval)(struct rq *rq,
1783 void (*update_curr)(struct rq *rq);
1793 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) in put_prev_task() argument
1795 WARN_ON_ONCE(rq->curr != prev); in put_prev_task()
1796 prev->sched_class->put_prev_task(rq, prev); in put_prev_task()
1799 static inline void set_next_task(struct rq *rq, struct task_struct *next) in set_next_task() argument
1801 WARN_ON_ONCE(rq->curr != next); in set_next_task()
1802 next->sched_class->set_next_task(rq, next, false); in set_next_task()
1823 static inline bool sched_stop_runnable(struct rq *rq) in sched_stop_runnable() argument
1825 return rq->stop && task_on_rq_queued(rq->stop); in sched_stop_runnable()
1828 static inline bool sched_dl_runnable(struct rq *rq) in sched_dl_runnable() argument
1830 return rq->dl.dl_nr_running > 0; in sched_dl_runnable()
1833 static inline bool sched_rt_runnable(struct rq *rq) in sched_rt_runnable() argument
1835 return rq->rt.rt_queued > 0; in sched_rt_runnable()
1838 static inline bool sched_fair_runnable(struct rq *rq) in sched_fair_runnable() argument
1840 return rq->cfs.nr_running > 0; in sched_fair_runnable()
1847 extern void trigger_load_balance(struct rq *rq);
1854 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
1857 rq->idle_state = idle_state; in idle_set_state()
1860 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
1864 return rq->idle_state; in idle_get_state()
1867 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
1872 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
1890 extern void resched_curr(struct rq *rq);
1913 extern bool sched_can_stop_tick(struct rq *rq);
1921 static inline void sched_update_tick_dependency(struct rq *rq) in sched_update_tick_dependency() argument
1928 cpu = cpu_of(rq); in sched_update_tick_dependency()
1933 if (sched_can_stop_tick(rq)) in sched_update_tick_dependency()
1940 static inline void sched_update_tick_dependency(struct rq *rq) { } in sched_update_tick_dependency() argument
1943 static inline void add_nr_running(struct rq *rq, unsigned count) in add_nr_running() argument
1945 unsigned prev_nr = rq->nr_running; in add_nr_running()
1947 rq->nr_running = prev_nr + count; in add_nr_running()
1950 if (prev_nr < 2 && rq->nr_running >= 2) { in add_nr_running()
1951 if (!READ_ONCE(rq->rd->overload)) in add_nr_running()
1952 WRITE_ONCE(rq->rd->overload, 1); in add_nr_running()
1956 sched_update_tick_dependency(rq); in add_nr_running()
1959 static inline void sub_nr_running(struct rq *rq, unsigned count) in sub_nr_running() argument
1961 rq->nr_running -= count; in sub_nr_running()
1963 sched_update_tick_dependency(rq); in sub_nr_running()
1966 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1967 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1969 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1981 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
1985 if (!cpu_active(cpu_of(rq))) in hrtick_enabled()
1987 return hrtimer_is_hres_active(&rq->hrtick_timer); in hrtick_enabled()
1990 void hrtick_start(struct rq *rq, u64 delay);
1994 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
2021 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
2031 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
2050 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
2076 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) in double_lock_balance()
2087 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) in double_unlock_balance()
2127 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) in double_rq_lock()
2152 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
2163 extern void set_rq_online (struct rq *rq);
2164 extern void set_rq_offline(struct rq *rq);
2175 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) in double_rq_lock()
2191 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
2241 extern void nohz_balance_exit_idle(struct rq *rq);
2243 static inline void nohz_balance_exit_idle(struct rq *rq) { } in nohz_balance_exit_idle() argument
2257 struct rq *rq = cpu_rq(i); in __dl_update() local
2259 rq->dl.extra_bw += bw; in __dl_update()
2328 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) in cpufreq_update_util() argument
2333 cpu_of(rq))); in cpufreq_update_util()
2335 data->func(data, rq_clock(rq), flags); in cpufreq_update_util()
2338 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} in cpufreq_update_util() argument
2362 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument
2371 min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); in uclamp_rq_util_with()
2372 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); in uclamp_rq_util_with()
2409 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument
2480 static inline unsigned long cpu_bw_dl(struct rq *rq) in cpu_bw_dl() argument
2482 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
2485 static inline unsigned long cpu_util_dl(struct rq *rq) in cpu_util_dl() argument
2487 return READ_ONCE(rq->avg_dl.util_avg); in cpu_util_dl()
2490 static inline unsigned long cpu_util_cfs(struct rq *rq) in cpu_util_cfs() argument
2492 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); in cpu_util_cfs()
2496 READ_ONCE(rq->cfs.avg.util_est.enqueued)); in cpu_util_cfs()
2502 static inline unsigned long cpu_util_rt(struct rq *rq) in cpu_util_rt() argument
2504 return READ_ONCE(rq->avg_rt.util_avg); in cpu_util_rt()
2516 static inline unsigned long cpu_util_irq(struct rq *rq) in cpu_util_irq() argument
2518 return rq->avg_irq.util_avg; in cpu_util_irq()
2531 static inline unsigned long cpu_util_irq(struct rq *rq) in cpu_util_irq() argument
2568 static inline void membarrier_switch_mm(struct rq *rq, in membarrier_switch_mm() argument
2578 if (READ_ONCE(rq->membarrier_state) == membarrier_state) in membarrier_switch_mm()
2581 WRITE_ONCE(rq->membarrier_state, membarrier_state); in membarrier_switch_mm()
2584 static inline void membarrier_switch_mm(struct rq *rq, in membarrier_switch_mm() argument