Lines Matching refs:rq
103 struct rq;
117 extern void calc_global_load_tick(struct rq *this_rq);
118 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
120 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
628 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
704 struct rq *rq; member
906 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
913 extern struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu);
960 struct rq;
963 void (*func)(struct rq *rq);
973 struct rq { struct
1167 struct rq *core; argument
1195 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
1197 return cfs_rq->rq; in rq_of()
1202 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
1204 return container_of(cfs_rq, struct rq, cfs); in rq_of()
1208 static inline int cpu_of(struct rq *rq) in cpu_of() argument
1211 return rq->cpu; in cpu_of()
1228 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1242 static inline bool sched_core_enabled(struct rq *rq) in sched_core_enabled() argument
1244 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; in sched_core_enabled()
1256 static inline raw_spinlock_t *rq_lockp(struct rq *rq) in rq_lockp() argument
1258 if (sched_core_enabled(rq)) in rq_lockp()
1259 return &rq->core->__lock; in rq_lockp()
1261 return &rq->__lock; in rq_lockp()
1264 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) in __rq_lockp() argument
1266 if (rq->core_enabled) in __rq_lockp()
1267 return &rq->core->__lock; in __rq_lockp()
1269 return &rq->__lock; in __rq_lockp()
1280 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) in sched_cpu_cookie_match() argument
1283 if (!sched_core_enabled(rq)) in sched_cpu_cookie_match()
1286 return rq->core->core_cookie == p->core_cookie; in sched_cpu_cookie_match()
1289 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) in sched_core_cookie_match() argument
1295 if (!sched_core_enabled(rq)) in sched_core_cookie_match()
1298 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { in sched_core_cookie_match()
1309 return idle_core || rq->core->core_cookie == p->core_cookie; in sched_core_cookie_match()
1312 static inline bool sched_group_cookie_match(struct rq *rq, in sched_group_cookie_match() argument
1319 if (!sched_core_enabled(rq)) in sched_group_cookie_match()
1334 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
1335 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags);
1342 static inline bool sched_core_enabled(struct rq *rq) in sched_core_enabled() argument
1352 static inline raw_spinlock_t *rq_lockp(struct rq *rq) in rq_lockp() argument
1354 return &rq->__lock; in rq_lockp()
1357 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) in __rq_lockp() argument
1359 return &rq->__lock; in __rq_lockp()
1362 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) in sched_cpu_cookie_match() argument
1367 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) in sched_core_cookie_match() argument
1372 static inline bool sched_group_cookie_match(struct rq *rq, in sched_group_cookie_match() argument
1380 static inline void lockdep_assert_rq_held(struct rq *rq) in lockdep_assert_rq_held() argument
1382 lockdep_assert_held(__rq_lockp(rq)); in lockdep_assert_rq_held()
1385 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
1386 extern bool raw_spin_rq_trylock(struct rq *rq);
1387 extern void raw_spin_rq_unlock(struct rq *rq);
1389 static inline void raw_spin_rq_lock(struct rq *rq) in raw_spin_rq_lock() argument
1391 raw_spin_rq_lock_nested(rq, 0); in raw_spin_rq_lock()
1394 static inline void raw_spin_rq_lock_irq(struct rq *rq) in raw_spin_rq_lock_irq() argument
1397 raw_spin_rq_lock(rq); in raw_spin_rq_lock_irq()
1400 static inline void raw_spin_rq_unlock_irq(struct rq *rq) in raw_spin_rq_unlock_irq() argument
1402 raw_spin_rq_unlock(rq); in raw_spin_rq_unlock_irq()
1406 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) in _raw_spin_rq_lock_irqsave() argument
1410 raw_spin_rq_lock(rq); in _raw_spin_rq_lock_irqsave()
1414 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) in raw_spin_rq_unlock_irqrestore() argument
1416 raw_spin_rq_unlock(rq); in raw_spin_rq_unlock_irqrestore()
1420 #define raw_spin_rq_lock_irqsave(rq, flags) \ argument
1422 flags = _raw_spin_rq_lock_irqsave(rq); \
1426 extern void __update_idle_core(struct rq *rq);
1428 static inline void update_idle_core(struct rq *rq) in update_idle_core() argument
1431 __update_idle_core(rq); in update_idle_core()
1435 static inline void update_idle_core(struct rq *rq) { } in update_idle_core() argument
1477 struct rq *rq = task_rq(p); in cfs_rq_of() local
1479 return &rq->cfs; in cfs_rq_of()
1489 extern void update_rq_clock(struct rq *rq);
1518 static inline void assert_clock_updated(struct rq *rq) in assert_clock_updated() argument
1524 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); in assert_clock_updated()
1527 static inline u64 rq_clock(struct rq *rq) in rq_clock() argument
1529 lockdep_assert_rq_held(rq); in rq_clock()
1530 assert_clock_updated(rq); in rq_clock()
1532 return rq->clock; in rq_clock()
1535 static inline u64 rq_clock_task(struct rq *rq) in rq_clock_task() argument
1537 lockdep_assert_rq_held(rq); in rq_clock_task()
1538 assert_clock_updated(rq); in rq_clock_task()
1540 return rq->clock_task; in rq_clock_task()
1556 static inline u64 rq_clock_thermal(struct rq *rq) in rq_clock_thermal() argument
1558 return rq_clock_task(rq) >> sched_thermal_decay_shift; in rq_clock_thermal()
1561 static inline void rq_clock_skip_update(struct rq *rq) in rq_clock_skip_update() argument
1563 lockdep_assert_rq_held(rq); in rq_clock_skip_update()
1564 rq->clock_update_flags |= RQCF_REQ_SKIP; in rq_clock_skip_update()
1571 static inline void rq_clock_cancel_skipupdate(struct rq *rq) in rq_clock_cancel_skipupdate() argument
1573 lockdep_assert_rq_held(rq); in rq_clock_cancel_skipupdate()
1574 rq->clock_update_flags &= ~RQCF_REQ_SKIP; in rq_clock_cancel_skipupdate()
1591 extern struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
1607 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock() argument
1609 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); in rq_pin_lock()
1612 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); in rq_pin_lock()
1615 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); in rq_pin_lock()
1620 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock() argument
1623 if (rq->clock_update_flags > RQCF_ACT_SKIP) in rq_unpin_lock()
1627 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); in rq_unpin_lock()
1630 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock() argument
1632 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); in rq_repin_lock()
1638 rq->clock_update_flags |= rf->clock_update_flags; in rq_repin_lock()
1642 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1643 __acquires(rq->lock);
1645 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1647 __acquires(rq->lock);
1649 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) in __task_rq_unlock() argument
1650 __releases(rq->lock) in __task_rq_unlock()
1652 rq_unpin_lock(rq, rf); in __task_rq_unlock()
1653 raw_spin_rq_unlock(rq); in __task_rq_unlock()
1657 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock() argument
1658 __releases(rq->lock) in task_rq_unlock()
1661 rq_unpin_lock(rq, rf); in task_rq_unlock()
1662 raw_spin_rq_unlock(rq); in task_rq_unlock()
1667 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave() argument
1668 __acquires(rq->lock) in rq_lock_irqsave()
1670 raw_spin_rq_lock_irqsave(rq, rf->flags); in rq_lock_irqsave()
1671 rq_pin_lock(rq, rf); in rq_lock_irqsave()
1675 rq_lock_irq(struct rq *rq, struct rq_flags *rf) in rq_lock_irq() argument
1676 __acquires(rq->lock) in rq_lock_irq()
1678 raw_spin_rq_lock_irq(rq); in rq_lock_irq()
1679 rq_pin_lock(rq, rf); in rq_lock_irq()
1683 rq_lock(struct rq *rq, struct rq_flags *rf) in rq_lock() argument
1684 __acquires(rq->lock) in rq_lock()
1686 raw_spin_rq_lock(rq); in rq_lock()
1687 rq_pin_lock(rq, rf); in rq_lock()
1691 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) in rq_unlock_irqrestore() argument
1692 __releases(rq->lock) in rq_unlock_irqrestore()
1694 rq_unpin_lock(rq, rf); in rq_unlock_irqrestore()
1695 raw_spin_rq_unlock_irqrestore(rq, rf->flags); in rq_unlock_irqrestore()
1699 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) in rq_unlock_irq() argument
1700 __releases(rq->lock) in rq_unlock_irq()
1702 rq_unpin_lock(rq, rf); in rq_unlock_irq()
1703 raw_spin_rq_unlock_irq(rq); in rq_unlock_irq()
1707 rq_unlock(struct rq *rq, struct rq_flags *rf) in rq_unlock() argument
1708 __releases(rq->lock) in rq_unlock()
1710 rq_unpin_lock(rq, rf); in rq_unlock()
1711 raw_spin_rq_unlock(rq); in rq_unlock()
1714 static inline struct rq *
1716 __acquires(rq->lock) in this_rq_lock_irq()
1718 struct rq *rq; in this_rq_lock_irq() local
1721 rq = this_rq(); in this_rq_lock_irq()
1722 rq_lock(rq, rf); in this_rq_lock_irq()
1723 return rq; in this_rq_lock_irq()
1775 queue_balance_callback(struct rq *rq, in queue_balance_callback() argument
1777 void (*func)(struct rq *rq)) in queue_balance_callback() argument
1779 lockdep_assert_rq_held(rq); in queue_balance_callback()
1786 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) in queue_balance_callback()
1790 head->next = rq->balance_callback; in queue_balance_callback()
1791 rq->balance_callback = head; in queue_balance_callback()
1929 extern void __sched_core_account_forceidle(struct rq *rq);
1931 static inline void sched_core_account_forceidle(struct rq *rq) in sched_core_account_forceidle() argument
1934 __sched_core_account_forceidle(rq); in sched_core_account_forceidle()
1937 extern void __sched_core_tick(struct rq *rq);
1939 static inline void sched_core_tick(struct rq *rq) in sched_core_tick() argument
1941 if (sched_core_enabled(rq) && schedstat_enabled()) in sched_core_tick()
1942 __sched_core_tick(rq); in sched_core_tick()
1947 static inline void sched_core_account_forceidle(struct rq *rq) {} in sched_core_account_forceidle() argument
1949 static inline void sched_core_tick(struct rq *rq) {} in sched_core_tick() argument
2100 static inline int task_current(struct rq *rq, struct task_struct *p) in task_current() argument
2102 return rq->curr == p; in task_current()
2105 static inline int task_on_cpu(struct rq *rq, struct task_struct *p) in task_on_cpu() argument
2110 return task_current(rq, p); in task_on_cpu()
2202 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
2203 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
2204 void (*yield_task) (struct rq *rq);
2205 bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
2207 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
2209 struct task_struct *(*pick_next_task)(struct rq *rq);
2211 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
2212 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
2215 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2218 struct task_struct * (*pick_task)(struct rq *rq);
2222 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
2228 void (*rq_online)(struct rq *rq);
2229 void (*rq_offline)(struct rq *rq);
2231 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
2234 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
2243 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
2244 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
2245 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
2248 unsigned int (*get_rr_interval)(struct rq *rq,
2251 void (*update_curr)(struct rq *rq);
2258 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) in put_prev_task() argument
2260 WARN_ON_ONCE(rq->curr != prev); in put_prev_task()
2261 prev->sched_class->put_prev_task(rq, prev); in put_prev_task()
2264 static inline void set_next_task(struct rq *rq, struct task_struct *next) in set_next_task() argument
2266 next->sched_class->set_next_task(rq, next, false); in set_next_task()
2303 static inline bool sched_stop_runnable(struct rq *rq) in sched_stop_runnable() argument
2305 return rq->stop && task_on_rq_queued(rq->stop); in sched_stop_runnable()
2308 static inline bool sched_dl_runnable(struct rq *rq) in sched_dl_runnable() argument
2310 return rq->dl.dl_nr_running > 0; in sched_dl_runnable()
2313 static inline bool sched_rt_runnable(struct rq *rq) in sched_rt_runnable() argument
2315 return rq->rt.rt_queued > 0; in sched_rt_runnable()
2318 static inline bool sched_fair_runnable(struct rq *rq) in sched_fair_runnable() argument
2320 return rq->cfs.nr_running > 0; in sched_fair_runnable()
2323 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_f…
2324 extern struct task_struct *pick_next_task_idle(struct rq *rq);
2335 extern void trigger_load_balance(struct rq *rq);
2339 static inline struct task_struct *get_push_task(struct rq *rq) in get_push_task() argument
2341 struct task_struct *p = rq->curr; in get_push_task()
2343 lockdep_assert_rq_held(rq); in get_push_task()
2345 if (rq->push_busy) in get_push_task()
2354 rq->push_busy = true; in get_push_task()
2364 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
2367 rq->idle_state = idle_state; in idle_set_state()
2370 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
2374 return rq->idle_state; in idle_get_state()
2377 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
2382 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
2400 extern void resched_curr(struct rq *rq);
2422 extern bool sched_can_stop_tick(struct rq *rq);
2430 static inline void sched_update_tick_dependency(struct rq *rq) in sched_update_tick_dependency() argument
2432 int cpu = cpu_of(rq); in sched_update_tick_dependency()
2437 if (sched_can_stop_tick(rq)) in sched_update_tick_dependency()
2444 static inline void sched_update_tick_dependency(struct rq *rq) { } in sched_update_tick_dependency() argument
2447 static inline void add_nr_running(struct rq *rq, unsigned count) in add_nr_running() argument
2449 unsigned prev_nr = rq->nr_running; in add_nr_running()
2451 rq->nr_running = prev_nr + count; in add_nr_running()
2453 call_trace_sched_update_nr_running(rq, count); in add_nr_running()
2457 if (prev_nr < 2 && rq->nr_running >= 2) { in add_nr_running()
2458 if (!READ_ONCE(rq->rd->overload)) in add_nr_running()
2459 WRITE_ONCE(rq->rd->overload, 1); in add_nr_running()
2463 sched_update_tick_dependency(rq); in add_nr_running()
2466 static inline void sub_nr_running(struct rq *rq, unsigned count) in sub_nr_running() argument
2468 rq->nr_running -= count; in sub_nr_running()
2470 call_trace_sched_update_nr_running(rq, -count); in sub_nr_running()
2474 sched_update_tick_dependency(rq); in sub_nr_running()
2477 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
2478 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
2480 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
2515 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
2517 if (!cpu_active(cpu_of(rq))) in hrtick_enabled()
2519 return hrtimer_is_hres_active(&rq->hrtick_timer); in hrtick_enabled()
2522 static inline int hrtick_enabled_fair(struct rq *rq) in hrtick_enabled_fair() argument
2526 return hrtick_enabled(rq); in hrtick_enabled_fair()
2529 static inline int hrtick_enabled_dl(struct rq *rq) in hrtick_enabled_dl() argument
2533 return hrtick_enabled(rq); in hrtick_enabled_dl()
2536 void hrtick_start(struct rq *rq, u64 delay);
2540 static inline int hrtick_enabled_fair(struct rq *rq) in hrtick_enabled_fair() argument
2545 static inline int hrtick_enabled_dl(struct rq *rq) in hrtick_enabled_dl() argument
2550 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
2589 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) in double_rq_clock_clear_update()
2598 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) {} in double_rq_clock_clear_update()
2603 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) in rq_order_less()
2629 extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
2641 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
2660 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
2688 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) in double_lock_balance()
2695 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) in double_unlock_balance()
2736 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
2747 extern void set_rq_online (struct rq *rq);
2748 extern void set_rq_offline(struct rq *rq);
2759 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) in double_rq_lock()
2776 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
2838 extern void nohz_balance_exit_idle(struct rq *rq);
2840 static inline void nohz_balance_exit_idle(struct rq *rq) { } in nohz_balance_exit_idle() argument
2904 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) in cpufreq_update_util() argument
2909 cpu_of(rq))); in cpufreq_update_util()
2911 data->func(data, rq_clock(rq), flags); in cpufreq_update_util()
2914 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} in cpufreq_update_util() argument
2965 static inline unsigned long cpu_bw_dl(struct rq *rq) in cpu_bw_dl() argument
2967 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
2970 static inline unsigned long cpu_util_dl(struct rq *rq) in cpu_util_dl() argument
2972 return READ_ONCE(rq->avg_dl.util_avg); in cpu_util_dl()
3023 static inline unsigned long cpu_util_rt(struct rq *rq) in cpu_util_rt() argument
3025 return READ_ONCE(rq->avg_rt.util_avg); in cpu_util_rt()
3032 static inline unsigned long uclamp_rq_get(struct rq *rq, in uclamp_rq_get() argument
3035 return READ_ONCE(rq->uclamp[clamp_id].value); in uclamp_rq_get()
3038 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, in uclamp_rq_set() argument
3041 WRITE_ONCE(rq->uclamp[clamp_id].value, value); in uclamp_rq_set()
3044 static inline bool uclamp_rq_is_idle(struct rq *rq) in uclamp_rq_is_idle() argument
3046 return rq->uclamp_flags & UCLAMP_FLAG_IDLE; in uclamp_rq_is_idle()
3067 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument
3084 if (uclamp_rq_is_idle(rq)) in uclamp_rq_util_with()
3088 min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN)); in uclamp_rq_util_with()
3089 max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX)); in uclamp_rq_util_with()
3103 static inline bool uclamp_rq_is_capped(struct rq *rq) in uclamp_rq_is_capped() argument
3111 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); in uclamp_rq_is_capped()
3112 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); in uclamp_rq_is_capped()
3140 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument
3146 static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } in uclamp_rq_is_capped() argument
3153 static inline unsigned long uclamp_rq_get(struct rq *rq, in uclamp_rq_get() argument
3162 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, in uclamp_rq_set() argument
3167 static inline bool uclamp_rq_is_idle(struct rq *rq) in uclamp_rq_is_idle() argument
3174 static inline unsigned long cpu_util_irq(struct rq *rq) in cpu_util_irq() argument
3176 return rq->avg_irq.util_avg; in cpu_util_irq()
3189 static inline unsigned long cpu_util_irq(struct rq *rq) in cpu_util_irq() argument
3226 static inline void membarrier_switch_mm(struct rq *rq, in membarrier_switch_mm() argument
3236 if (READ_ONCE(rq->membarrier_state) == membarrier_state) in membarrier_switch_mm()
3239 WRITE_ONCE(rq->membarrier_state, membarrier_state); in membarrier_switch_mm()
3242 static inline void membarrier_switch_mm(struct rq *rq, in membarrier_switch_mm() argument