Lines Matching refs:rq
181 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
183 return rt_rq->rq; in rq_of_rt_rq()
191 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
195 return rt_rq->rq; in rq_of_rt_se()
224 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
228 rt_rq->rq = rq; in init_tg_rt_entry()
238 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
296 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
298 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
301 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
310 struct rq *rq = rq_of_rt_se(rt_se); in rt_rq_of_se() local
312 return &rq->rt; in rt_rq_of_se()
327 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) in need_pull_rt_task() argument
330 return rq->online && rq->rt.highest_prio.curr > prev->prio; in need_pull_rt_task()
333 static inline int rt_overloaded(struct rq *rq) in rt_overloaded() argument
335 return atomic_read(&rq->rd->rto_count); in rt_overloaded()
338 static inline void rt_set_overload(struct rq *rq) in rt_set_overload() argument
340 if (!rq->online) in rt_set_overload()
343 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); in rt_set_overload()
354 atomic_inc(&rq->rd->rto_count); in rt_set_overload()
357 static inline void rt_clear_overload(struct rq *rq) in rt_clear_overload() argument
359 if (!rq->online) in rt_clear_overload()
363 atomic_dec(&rq->rd->rto_count); in rt_clear_overload()
364 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); in rt_clear_overload()
414 static inline int has_pushable_tasks(struct rq *rq) in has_pushable_tasks() argument
416 return !plist_head_empty(&rq->rt.pushable_tasks); in has_pushable_tasks()
422 static void push_rt_tasks(struct rq *);
423 static void pull_rt_task(struct rq *);
425 static inline void rt_queue_push_tasks(struct rq *rq) in rt_queue_push_tasks() argument
427 if (!has_pushable_tasks(rq)) in rt_queue_push_tasks()
430 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); in rt_queue_push_tasks()
433 static inline void rt_queue_pull_task(struct rq *rq) in rt_queue_pull_task() argument
435 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); in rt_queue_pull_task()
438 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
440 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
442 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
445 if (p->prio < rq->rt.highest_prio.next) in enqueue_pushable_task()
446 rq->rt.highest_prio.next = p->prio; in enqueue_pushable_task()
449 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
451 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in dequeue_pushable_task()
454 if (has_pushable_tasks(rq)) { in dequeue_pushable_task()
455 p = plist_first_entry(&rq->rt.pushable_tasks, in dequeue_pushable_task()
457 rq->rt.highest_prio.next = p->prio; in dequeue_pushable_task()
459 rq->rt.highest_prio.next = MAX_RT_PRIO-1; in dequeue_pushable_task()
465 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
469 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
483 static inline void rt_queue_push_tasks(struct rq *rq) in rt_queue_push_tasks() argument
565 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
568 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
584 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue() local
587 int cpu = cpu_of(rq); in sched_rt_rq_enqueue()
598 resched_curr(rq); in sched_rt_rq_enqueue()
672 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
673 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
685 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue() local
691 resched_curr(rq); in sched_rt_rq_enqueue()
786 static void __disable_runtime(struct rq *rq) in __disable_runtime() argument
788 struct root_domain *rd = rq->rd; in __disable_runtime()
795 for_each_rt_rq(rt_rq, iter, rq) { in __disable_runtime()
868 static void __enable_runtime(struct rq *rq) in __enable_runtime() argument
879 for_each_rt_rq(rt_rq, iter, rq) { in __enable_runtime()
929 struct rq *rq = rq_of_rt_rq(rt_rq); in do_sched_rt_period_timer() local
945 rq_lock(rq, &rf); in do_sched_rt_period_timer()
946 update_rq_clock(rq); in do_sched_rt_period_timer()
967 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
968 rq_clock_cancel_skipupdate(rq); in do_sched_rt_period_timer()
983 rq_unlock(rq, &rf); in do_sched_rt_period_timer()
1058 static void update_curr_rt(struct rq *rq) in update_curr_rt() argument
1060 struct task_struct *curr = rq->curr; in update_curr_rt()
1068 now = rq_clock_task(rq); in update_curr_rt()
1094 resched_curr(rq); in update_curr_rt()
1105 struct rq *rq = rq_of_rt_rq(rt_rq); in dequeue_top_rt_rq() local
1107 BUG_ON(&rq->rt != rt_rq); in dequeue_top_rt_rq()
1112 BUG_ON(!rq->nr_running); in dequeue_top_rt_rq()
1114 sub_nr_running(rq, count); in dequeue_top_rt_rq()
1122 struct rq *rq = rq_of_rt_rq(rt_rq); in enqueue_top_rt_rq() local
1124 BUG_ON(&rq->rt != rt_rq); in enqueue_top_rt_rq()
1133 add_nr_running(rq, rt_rq->rt_nr_running); in enqueue_top_rt_rq()
1138 cpufreq_update_util(rq, 0); in enqueue_top_rt_rq()
1146 struct rq *rq = rq_of_rt_rq(rt_rq); in inc_rt_prio_smp() local
1152 if (&rq->rt != rt_rq) in inc_rt_prio_smp()
1155 if (rq->online && prio < prev_prio) in inc_rt_prio_smp()
1156 cpupri_set(&rq->rd->cpupri, rq->cpu, prio); in inc_rt_prio_smp()
1162 struct rq *rq = rq_of_rt_rq(rt_rq); in dec_rt_prio_smp() local
1168 if (&rq->rt != rt_rq) in dec_rt_prio_smp()
1171 if (rq->online && rt_rq->highest_prio.curr != prev_prio) in dec_rt_prio_smp()
1172 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); in dec_rt_prio_smp()
1519 struct rq *rq = rq_of_rt_se(rt_se); in enqueue_rt_entity() local
1526 enqueue_top_rt_rq(&rq->rt); in enqueue_rt_entity()
1531 struct rq *rq = rq_of_rt_se(rt_se); in dequeue_rt_entity() local
1543 enqueue_top_rt_rq(&rq->rt); in dequeue_rt_entity()
1547 static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, in should_honor_rt_sync() argument
1555 return sync && task_has_rt_policy(rq->curr) && in should_honor_rt_sync()
1556 p->prio <= rq->rt.highest_prio.next && in should_honor_rt_sync()
1557 rq->rt.rt_nr_running <= 2; in should_honor_rt_sync()
1560 static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, in should_honor_rt_sync() argument
1571 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_rt() argument
1584 if (!task_current(rq, p) && p->nr_cpus_allowed > 1 && in enqueue_task_rt()
1585 !should_honor_rt_sync(rq, p, sync)) in enqueue_task_rt()
1586 enqueue_pushable_task(rq, p); in enqueue_task_rt()
1589 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_rt() argument
1593 update_curr_rt(rq); in dequeue_task_rt()
1596 dequeue_pushable_task(rq, p); in dequeue_task_rt()
1617 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) in requeue_task_rt() argument
1628 static void yield_task_rt(struct rq *rq) in yield_task_rt() argument
1630 requeue_task_rt(rq, rq->curr, 0); in yield_task_rt()
1667 struct rq *rq; in select_task_rq_rt() local
1668 struct rq *this_cpu_rq; in select_task_rq_rt()
1683 rq = cpu_rq(cpu); in select_task_rq_rt()
1686 curr = READ_ONCE(rq->curr); /* unlocked access */ in select_task_rq_rt()
1757 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) in check_preempt_equal_prio() argument
1763 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_prio()
1764 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) in check_preempt_equal_prio()
1772 cpupri_find(&rq->rd->cpupri, p, NULL)) in check_preempt_equal_prio()
1780 requeue_task_rt(rq, p, 1); in check_preempt_equal_prio()
1781 resched_curr(rq); in check_preempt_equal_prio()
1784 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in balance_rt() argument
1786 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { in balance_rt()
1795 rq_unpin_lock(rq, rf); in balance_rt()
1796 trace_android_rvh_sched_balance_rt(rq, p, &done); in balance_rt()
1798 pull_rt_task(rq); in balance_rt()
1799 rq_repin_lock(rq, rf); in balance_rt()
1802 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq); in balance_rt()
1809 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_rt() argument
1811 if (p->prio < rq->curr->prio) { in check_preempt_curr_rt()
1812 resched_curr(rq); in check_preempt_curr_rt()
1829 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) in check_preempt_curr_rt()
1830 check_preempt_equal_prio(rq, p); in check_preempt_curr_rt()
1834 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) in set_next_task_rt() argument
1837 struct rt_rq *rt_rq = &rq->rt; in set_next_task_rt()
1839 p->se.exec_start = rq_clock_task(rq); in set_next_task_rt()
1844 dequeue_pushable_task(rq, p); in set_next_task_rt()
1854 if (rq->curr->sched_class != &rt_sched_class) in set_next_task_rt()
1855 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); in set_next_task_rt()
1856 trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 0); in set_next_task_rt()
1858 rt_queue_push_tasks(rq); in set_next_task_rt()
1879 static struct task_struct *_pick_next_task_rt(struct rq *rq) in _pick_next_task_rt() argument
1882 struct rt_rq *rt_rq = &rq->rt; in _pick_next_task_rt()
1894 static struct task_struct *pick_task_rt(struct rq *rq) in pick_task_rt() argument
1898 if (!sched_rt_runnable(rq)) in pick_task_rt()
1901 p = _pick_next_task_rt(rq); in pick_task_rt()
1906 static struct task_struct *pick_next_task_rt(struct rq *rq) in pick_next_task_rt() argument
1908 struct task_struct *p = pick_task_rt(rq); in pick_next_task_rt()
1911 set_next_task_rt(rq, p, true); in pick_next_task_rt()
1916 static void put_prev_task_rt(struct rq *rq, struct task_struct *p) in put_prev_task_rt() argument
1919 struct rt_rq *rt_rq = &rq->rt; in put_prev_task_rt()
1924 update_curr_rt(rq); in put_prev_task_rt()
1926 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); in put_prev_task_rt()
1927 trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1); in put_prev_task_rt()
1934 enqueue_pushable_task(rq, p); in put_prev_task_rt()
1942 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) in pick_rt_task() argument
1944 if (!task_on_cpu(rq, p) && in pick_rt_task()
1955 struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) in pick_highest_pushable_task() argument
1957 struct plist_head *head = &rq->rt.pushable_tasks; in pick_highest_pushable_task()
1960 if (!has_pushable_tasks(rq)) in pick_highest_pushable_task()
1964 if (pick_rt_task(rq, p, cpu)) in pick_highest_pushable_task()
2075 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) in find_lock_lowest_rq() argument
2077 struct rq *lowest_rq = NULL; in find_lock_lowest_rq()
2084 if ((cpu == -1) || (cpu == rq->cpu)) in find_lock_lowest_rq()
2100 if (double_lock_balance(rq, lowest_rq)) { in find_lock_lowest_rq()
2110 if (unlikely(task_rq(task) != rq || in find_lock_lowest_rq()
2112 task_on_cpu(rq, task) || in find_lock_lowest_rq()
2117 double_unlock_balance(rq, lowest_rq); in find_lock_lowest_rq()
2128 double_unlock_balance(rq, lowest_rq); in find_lock_lowest_rq()
2135 static struct task_struct *pick_next_pushable_task(struct rq *rq) in pick_next_pushable_task() argument
2139 if (!has_pushable_tasks(rq)) in pick_next_pushable_task()
2142 p = plist_first_entry(&rq->rt.pushable_tasks, in pick_next_pushable_task()
2145 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_task()
2146 BUG_ON(task_current(rq, p)); in pick_next_pushable_task()
2160 static int push_rt_task(struct rq *rq, bool pull) in push_rt_task() argument
2163 struct rq *lowest_rq; in push_rt_task()
2166 if (!rq->rt.overloaded) in push_rt_task()
2169 next_task = pick_next_pushable_task(rq); in push_rt_task()
2179 if (unlikely(next_task->prio < rq->curr->prio)) { in push_rt_task()
2180 resched_curr(rq); in push_rt_task()
2188 if (!pull || rq->push_busy) in push_rt_task()
2200 if (rq->curr->sched_class != &rt_sched_class) in push_rt_task()
2203 cpu = find_lowest_rq(rq->curr); in push_rt_task()
2204 if (cpu == -1 || cpu == rq->cpu) in push_rt_task()
2213 push_task = get_push_task(rq); in push_rt_task()
2216 raw_spin_rq_unlock(rq); in push_rt_task()
2217 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, in push_rt_task()
2218 push_task, &rq->push_work); in push_rt_task()
2220 raw_spin_rq_lock(rq); in push_rt_task()
2226 if (WARN_ON(next_task == rq->curr)) in push_rt_task()
2233 lowest_rq = find_lock_lowest_rq(next_task, rq); in push_rt_task()
2244 task = pick_next_pushable_task(rq); in push_rt_task()
2267 deactivate_task(rq, next_task, 0); in push_rt_task()
2273 double_unlock_balance(rq, lowest_rq); in push_rt_task()
2280 static void push_rt_tasks(struct rq *rq) in push_rt_tasks() argument
2283 while (push_rt_task(rq, false)) in push_rt_tasks()
2390 static void tell_cpu_to_push(struct rq *rq) in tell_cpu_to_push() argument
2395 atomic_inc(&rq->rd->rto_loop_next); in tell_cpu_to_push()
2398 if (!rto_start_trylock(&rq->rd->rto_loop_start)) in tell_cpu_to_push()
2401 raw_spin_lock(&rq->rd->rto_lock); in tell_cpu_to_push()
2409 if (rq->rd->rto_cpu < 0) in tell_cpu_to_push()
2410 cpu = rto_next_cpu(rq->rd); in tell_cpu_to_push()
2412 raw_spin_unlock(&rq->rd->rto_lock); in tell_cpu_to_push()
2414 rto_start_unlock(&rq->rd->rto_loop_start); in tell_cpu_to_push()
2418 sched_get_rd(rq->rd); in tell_cpu_to_push()
2419 irq_work_queue_on(&rq->rd->rto_push_work, cpu); in tell_cpu_to_push()
2428 struct rq *rq; in rto_push_irq_work_func() local
2431 rq = this_rq(); in rto_push_irq_work_func()
2437 if (has_pushable_tasks(rq)) { in rto_push_irq_work_func()
2438 raw_spin_rq_lock(rq); in rto_push_irq_work_func()
2439 while (push_rt_task(rq, true)) in rto_push_irq_work_func()
2441 raw_spin_rq_unlock(rq); in rto_push_irq_work_func()
2461 static void pull_rt_task(struct rq *this_rq) in pull_rt_task()
2466 struct rq *src_rq; in pull_rt_task()
2576 static void task_woken_rt(struct rq *rq, struct task_struct *p) in task_woken_rt() argument
2578 bool need_to_push = !task_on_cpu(rq, p) && in task_woken_rt()
2579 !test_tsk_need_resched(rq->curr) && in task_woken_rt()
2581 (dl_task(rq->curr) || rt_task(rq->curr)) && in task_woken_rt()
2582 (rq->curr->nr_cpus_allowed < 2 || in task_woken_rt()
2583 rq->curr->prio <= p->prio); in task_woken_rt()
2586 push_rt_tasks(rq); in task_woken_rt()
2590 static void rq_online_rt(struct rq *rq) in rq_online_rt() argument
2592 if (rq->rt.overloaded) in rq_online_rt()
2593 rt_set_overload(rq); in rq_online_rt()
2595 __enable_runtime(rq); in rq_online_rt()
2597 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); in rq_online_rt()
2601 static void rq_offline_rt(struct rq *rq) in rq_offline_rt() argument
2603 if (rq->rt.overloaded) in rq_offline_rt()
2604 rt_clear_overload(rq); in rq_offline_rt()
2606 __disable_runtime(rq); in rq_offline_rt()
2608 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); in rq_offline_rt()
2615 static void switched_from_rt(struct rq *rq, struct task_struct *p) in switched_from_rt() argument
2624 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) in switched_from_rt()
2627 rt_queue_pull_task(rq); in switched_from_rt()
2646 static void switched_to_rt(struct rq *rq, struct task_struct *p) in switched_to_rt() argument
2652 if (task_current(rq, p)) { in switched_to_rt()
2653 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); in switched_to_rt()
2664 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) in switched_to_rt()
2665 rt_queue_push_tasks(rq); in switched_to_rt()
2667 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
2668 resched_curr(rq); in switched_to_rt()
2677 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_rt() argument
2682 if (task_current(rq, p)) { in prio_changed_rt()
2689 rt_queue_pull_task(rq); in prio_changed_rt()
2695 if (p->prio > rq->rt.highest_prio.curr) in prio_changed_rt()
2696 resched_curr(rq); in prio_changed_rt()
2700 resched_curr(rq); in prio_changed_rt()
2708 if (p->prio < rq->curr->prio) in prio_changed_rt()
2709 resched_curr(rq); in prio_changed_rt()
2714 static void watchdog(struct rq *rq, struct task_struct *p) in watchdog() argument
2738 static inline void watchdog(struct rq *rq, struct task_struct *p) { } in watchdog() argument
2749 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) in task_tick_rt() argument
2753 update_curr_rt(rq); in task_tick_rt()
2754 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); in task_tick_rt()
2755 trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1); in task_tick_rt()
2757 watchdog(rq, p); in task_tick_rt()
2777 requeue_task_rt(rq, p, 0); in task_tick_rt()
2778 resched_curr(rq); in task_tick_rt()
2784 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) in get_rr_interval_rt() argument