• Home
  • Raw
  • Download

Lines Matching refs:rq

357 	struct rq *rq = rq_of(cfs_rq);  in list_add_leaf_cfs_rq()  local
358 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq()
361 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
389 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
399 &rq->leaf_cfs_rq_list); in list_add_leaf_cfs_rq()
404 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
414 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); in list_add_leaf_cfs_rq()
419 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
426 struct rq *rq = rq_of(cfs_rq); in list_del_leaf_cfs_rq() local
435 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) in list_del_leaf_cfs_rq()
436 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; in list_del_leaf_cfs_rq()
443 static inline void assert_list_leaf_cfs_rq(struct rq *rq) in assert_list_leaf_cfs_rq() argument
445 SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list); in assert_list_leaf_cfs_rq()
449 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
450 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
531 static inline void assert_list_leaf_cfs_rq(struct rq *rq) in assert_list_leaf_cfs_rq() argument
535 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
536 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
934 static void update_curr_fair(struct rq *rq) in update_curr_fair() argument
936 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
1235 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
1237 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); in account_numa_enqueue()
1238 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); in account_numa_enqueue()
1241 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
1243 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); in account_numa_dequeue()
1244 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); in account_numa_dequeue()
1737 static unsigned long cpu_load(struct rq *rq);
1738 static unsigned long cpu_runnable(struct rq *rq);
1799 struct rq *rq = cpu_rq(cpu); in update_numa_stats() local
1801 ns->load += cpu_load(rq); in update_numa_stats()
1802 ns->runnable += cpu_runnable(rq); in update_numa_stats()
1804 ns->nr_running += rq->cfs.h_nr_running; in update_numa_stats()
1807 if (find_idle && !rq->nr_running && idle_cpu(cpu)) { in update_numa_stats()
1808 if (READ_ONCE(rq->numa_migrate_on) || in update_numa_stats()
1831 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign() local
1834 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) { in task_numa_assign()
1846 rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1847 if (!xchg(&rq->numa_migrate_on, 1)) in task_numa_assign()
1861 rq = cpu_rq(env->best_cpu); in task_numa_assign()
1862 WRITE_ONCE(rq->numa_migrate_on, 0); in task_numa_assign()
1920 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
2180 struct rq *best_rq; in task_numa_migrate()
3131 static void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa() argument
3196 static void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa() argument
3200 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
3204 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
3220 struct rq *rq = rq_of(cfs_rq); in account_entity_enqueue() local
3222 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
3223 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
3504 struct rq *rq = rq_of(cfs_rq); in cfs_rq_util_change() local
3506 if (&rq->cfs == cfs_rq) { in cfs_rq_util_change()
3521 cpufreq_update_util(rq, flags); in cfs_rq_util_change()
3571 struct rq *rq = rq_of(cfs_rq); in child_cfs_rq_on_list() local
3573 prev = rq->tmp_alone_branch; in child_cfs_rq_on_list()
3935 struct rq *rq; in migrate_se_pelt_lag() local
3942 rq = rq_of(cfs_rq); in migrate_se_pelt_lag()
3945 is_idle = is_idle_task(rcu_dereference(rq->curr)); in migrate_se_pelt_lag()
3987 now = u64_u32_load(rq->clock_pelt_idle); in migrate_se_pelt_lag()
4005 now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle); in migrate_se_pelt_lag()
4284 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
4577 inline void update_misfit_status(struct task_struct *p, struct rq *rq) in update_misfit_status() argument
4581 trace_android_rvh_update_misfit_status(p, rq, &need_update); in update_misfit_status()
4586 rq->misfit_task_load = 0; in update_misfit_status()
4590 if (task_fits_cpu(p, cpu_of(rq))) { in update_misfit_status()
4591 rq->misfit_task_load = 0; in update_misfit_status()
4599 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); in update_misfit_status()
4627 static inline int newidle_balance(struct rq *rq, struct rq_flags *rf) in newidle_balance() argument
4641 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} in update_misfit_status() argument
5331 struct rq *rq = data; in tg_unthrottle_up() local
5332 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up()
5336 cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) - in tg_unthrottle_up()
5349 struct rq *rq = data; in tg_throttle_down() local
5350 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down()
5354 cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq); in tg_throttle_down()
5364 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq() local
5394 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
5437 sub_nr_running(rq, task_delta); in throttle_cfs_rq()
5445 cfs_rq->throttled_clock = rq_clock(rq); in throttle_cfs_rq()
5451 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq() local
5456 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
5460 update_rq_clock(rq); in unthrottle_cfs_rq()
5463 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq()
5468 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
5522 add_nr_running(rq, task_delta); in unthrottle_cfs_rq()
5525 assert_list_leaf_cfs_rq(rq); in unthrottle_cfs_rq()
5528 if (rq->curr == rq->idle && rq->cfs.nr_running) in unthrottle_cfs_rq()
5529 resched_curr(rq); in unthrottle_cfs_rq()
5540 struct rq *rq = rq_of(cfs_rq); in distribute_cfs_runtime() local
5543 rq_lock_irqsave(rq, &rf); in distribute_cfs_runtime()
5565 rq_unlock_irqrestore(rq, &rf); in distribute_cfs_runtime()
5927 static void __maybe_unused update_runtime_enabled(struct rq *rq) in update_runtime_enabled() argument
5931 lockdep_assert_rq_held(rq); in update_runtime_enabled()
5936 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled()
5946 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) in unthrottle_offline_cfs_rqs() argument
5950 lockdep_assert_rq_held(rq); in unthrottle_offline_cfs_rqs()
5954 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in unthrottle_offline_cfs_rqs()
6016 static inline void update_runtime_enabled(struct rq *rq) {} in update_runtime_enabled() argument
6017 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} in unthrottle_offline_cfs_rqs() argument
6026 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
6031 SCHED_WARN_ON(task_rq(p) != rq); in hrtick_start_fair()
6033 if (rq->cfs.h_nr_running > 1) { in hrtick_start_fair()
6039 if (task_current(rq, p)) in hrtick_start_fair()
6040 resched_curr(rq); in hrtick_start_fair()
6043 hrtick_start(rq, delta); in hrtick_start_fair()
6052 static void hrtick_update(struct rq *rq) in hrtick_update() argument
6054 struct task_struct *curr = rq->curr; in hrtick_update()
6056 if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class) in hrtick_update()
6060 hrtick_start_fair(rq, curr); in hrtick_update()
6064 hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
6068 static inline void hrtick_update(struct rq *rq) in hrtick_update() argument
6088 static inline void update_overutilized_status(struct rq *rq) in update_overutilized_status() argument
6090 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { in update_overutilized_status()
6091 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); in update_overutilized_status()
6092 trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); in update_overutilized_status()
6096 static inline void update_overutilized_status(struct rq *rq) { } in update_overutilized_status() argument
6100 static int sched_idle_rq(struct rq *rq) in sched_idle_rq() argument
6102 return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running && in sched_idle_rq()
6103 rq->nr_running); in sched_idle_rq()
6130 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_fair() argument
6144 util_est_enqueue(&rq->cfs, p); in enqueue_task_fair()
6152 trace_android_rvh_set_iowait(p, rq, &should_iowait_boost); in enqueue_task_fair()
6154 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); in enqueue_task_fair()
6175 trace_android_rvh_enqueue_task_fair(rq, p, flags); in enqueue_task_fair()
6195 add_nr_running(rq, 1); in enqueue_task_fair()
6212 update_overutilized_status(rq); in enqueue_task_fair()
6215 assert_list_leaf_cfs_rq(rq); in enqueue_task_fair()
6217 hrtick_update(rq); in enqueue_task_fair()
6227 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_fair() argument
6233 bool was_sched_idle = sched_idle_rq(rq); in dequeue_task_fair()
6235 util_est_dequeue(&rq->cfs, p); in dequeue_task_fair()
6266 trace_android_rvh_dequeue_task_fair(rq, p, flags); in dequeue_task_fair()
6287 sub_nr_running(rq, 1); in dequeue_task_fair()
6290 if (unlikely(!was_sched_idle && sched_idle_rq(rq))) in dequeue_task_fair()
6291 rq->next_balance = jiffies; in dequeue_task_fair()
6294 util_est_update(&rq->cfs, p, task_sleep); in dequeue_task_fair()
6295 hrtick_update(rq); in dequeue_task_fair()
6317 static unsigned long cpu_load(struct rq *rq) in cpu_load() argument
6319 return cfs_rq_load_avg(&rq->cfs); in cpu_load()
6335 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p) in cpu_load_without() argument
6341 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
6342 return cpu_load(rq); in cpu_load_without()
6344 cfs_rq = &rq->cfs; in cpu_load_without()
6353 static unsigned long cpu_runnable(struct rq *rq) in cpu_runnable() argument
6355 return cfs_rq_runnable_avg(&rq->cfs); in cpu_runnable()
6358 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p) in cpu_runnable_without() argument
6364 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
6365 return cpu_runnable(rq); in cpu_runnable_without()
6367 cfs_rq = &rq->cfs; in cpu_runnable_without()
6552 struct rq *rq = cpu_rq(i); in find_idlest_group_cpu() local
6554 if (!sched_core_cookie_match(rq, p)) in find_idlest_group_cpu()
6561 struct cpuidle_state *idle = idle_get_state(rq); in find_idlest_group_cpu()
6569 latest_idle_timestamp = rq->idle_stamp; in find_idlest_group_cpu()
6572 rq->idle_stamp > latest_idle_timestamp) { in find_idlest_group_cpu()
6578 latest_idle_timestamp = rq->idle_stamp; in find_idlest_group_cpu()
6686 void __update_idle_core(struct rq *rq) in __update_idle_core() argument
6688 int core = cpu_of(rq); in __update_idle_core()
6791 struct rq *this_rq = this_rq(); in select_idle_cpu()
7371 struct rq *rq = cpu_rq(cpu); in find_energy_efficient_cpu() local
7391 if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) { in find_energy_efficient_cpu()
7399 rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN); in find_energy_efficient_cpu()
7400 rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX); in find_energy_efficient_cpu()
7632 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_fair() argument
7634 if (rq->nr_running) in balance_fair()
7637 return newidle_balance(rq, rf) != 0; in balance_fair()
7721 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) in check_preempt_wakeup() argument
7723 struct task_struct *curr = rq->curr; in check_preempt_wakeup()
7793 trace_android_rvh_check_preempt_wakeup(rq, p, &preempt, &ignore, in check_preempt_wakeup()
7813 resched_curr(rq); in check_preempt_wakeup()
7823 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
7831 static struct task_struct *pick_task_fair(struct rq *rq) in pick_task_fair() argument
7837 cfs_rq = &rq->cfs; in pick_task_fair()
7864 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task_fair() argument
7866 struct cfs_rq *cfs_rq = &rq->cfs; in pick_next_task_fair()
7873 if (!sched_fair_runnable(rq)) in pick_next_task_fair()
7910 cfs_rq = &rq->cfs; in pick_next_task_fair()
7924 trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, false, prev); in pick_next_task_fair()
7955 put_prev_task(rq, prev); in pick_next_task_fair()
7957 trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, true, prev); in pick_next_task_fair()
7976 list_move(&p->se.group_node, &rq->cfs_tasks); in pick_next_task_fair()
7979 if (hrtick_enabled_fair(rq)) in pick_next_task_fair()
7980 hrtick_start_fair(rq, p); in pick_next_task_fair()
7982 update_misfit_status(p, rq); in pick_next_task_fair()
7990 new_tasks = newidle_balance(rq, rf); in pick_next_task_fair()
8007 update_idle_rq_clock_pelt(rq); in pick_next_task_fair()
8012 static struct task_struct *__pick_next_task_fair(struct rq *rq) in __pick_next_task_fair() argument
8014 return pick_next_task_fair(rq, NULL, NULL); in __pick_next_task_fair()
8020 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) in put_prev_task_fair() argument
8036 static void yield_task_fair(struct rq *rq) in yield_task_fair() argument
8038 struct task_struct *curr = rq->curr; in yield_task_fair()
8045 if (unlikely(rq->nr_running == 1)) in yield_task_fair()
8051 update_rq_clock(rq); in yield_task_fair()
8061 rq_clock_skip_update(rq); in yield_task_fair()
8067 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) in yield_to_task_fair() argument
8078 yield_task_fair(rq); in yield_to_task_fair()
8261 struct rq *src_rq;
8265 struct rq *dst_rq;
8687 static void attach_task(struct rq *rq, struct task_struct *p) in attach_task() argument
8689 lockdep_assert_rq_held(rq); in attach_task()
8691 WARN_ON_ONCE(task_rq(p) != rq); in attach_task()
8692 activate_task(rq, p, ENQUEUE_NOCLOCK); in attach_task()
8693 check_preempt_curr(rq, p, 0); in attach_task()
8700 static void attach_one_task(struct rq *rq, struct task_struct *p) in attach_one_task() argument
8704 rq_lock(rq, &rf); in attach_one_task()
8705 update_rq_clock(rq); in attach_one_task()
8706 attach_task(rq, p); in attach_one_task()
8707 rq_unlock(rq, &rf); in attach_one_task()
8745 static inline bool others_have_blocked(struct rq *rq) in others_have_blocked() argument
8747 if (READ_ONCE(rq->avg_rt.util_avg)) in others_have_blocked()
8750 if (READ_ONCE(rq->avg_dl.util_avg)) in others_have_blocked()
8753 if (thermal_load_avg(rq)) in others_have_blocked()
8757 if (READ_ONCE(rq->avg_irq.util_avg)) in others_have_blocked()
8764 static inline void update_blocked_load_tick(struct rq *rq) in update_blocked_load_tick() argument
8766 WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies); in update_blocked_load_tick()
8769 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) in update_blocked_load_status() argument
8772 rq->has_blocked_load = 0; in update_blocked_load_status()
8776 static inline bool others_have_blocked(struct rq *rq) { return false; } in others_have_blocked() argument
8777 static inline void update_blocked_load_tick(struct rq *rq) {} in update_blocked_load_tick() argument
8778 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {} in update_blocked_load_status() argument
8781 static bool __update_blocked_others(struct rq *rq, bool *done) in __update_blocked_others() argument
8784 u64 now = rq_clock_pelt(rq); in __update_blocked_others()
8792 curr_class = rq->curr->sched_class; in __update_blocked_others()
8794 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); in __update_blocked_others()
8796 decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | in __update_blocked_others()
8797 update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | in __update_blocked_others()
8798 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) | in __update_blocked_others()
8799 update_irq_load_avg(rq, 0); in __update_blocked_others()
8801 if (others_have_blocked(rq)) in __update_blocked_others()
8809 static bool __update_blocked_fair(struct rq *rq, bool *done) in __update_blocked_fair() argument
8813 int cpu = cpu_of(rq); in __update_blocked_fair()
8815 trace_android_rvh_update_blocked_fair(rq); in __update_blocked_fair()
8821 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { in __update_blocked_fair()
8830 if (cfs_rq == &rq->cfs) in __update_blocked_fair()
8861 struct rq *rq = rq_of(cfs_rq); in update_cfs_rq_h_load() local
8862 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
8901 static bool __update_blocked_fair(struct rq *rq, bool *done) in __update_blocked_fair() argument
8903 struct cfs_rq *cfs_rq = &rq->cfs; in __update_blocked_fair()
8922 struct rq *rq = cpu_rq(cpu); in update_blocked_averages() local
8925 rq_lock_irqsave(rq, &rf); in update_blocked_averages()
8926 update_blocked_load_tick(rq); in update_blocked_averages()
8927 update_rq_clock(rq); in update_blocked_averages()
8929 decayed |= __update_blocked_others(rq, &done); in update_blocked_averages()
8930 decayed |= __update_blocked_fair(rq, &done); in update_blocked_averages()
8932 update_blocked_load_status(rq, !done); in update_blocked_averages()
8934 cpufreq_update_util(rq, 0); in update_blocked_averages()
8935 rq_unlock_irqrestore(rq, &rf); in update_blocked_averages()
9001 struct rq *rq = cpu_rq(cpu); in scale_rt_capacity() local
9006 irq = cpu_util_irq(rq); in scale_rt_capacity()
9017 used = READ_ONCE(rq->avg_rt.util_avg); in scale_rt_capacity()
9018 used += READ_ONCE(rq->avg_dl.util_avg); in scale_rt_capacity()
9019 used += thermal_load_avg(rq); in scale_rt_capacity()
9109 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) in check_cpu_capacity() argument
9111 return ((rq->cpu_capacity * sd->imbalance_pct) < in check_cpu_capacity()
9112 (rq->cpu_capacity_orig * 100)); in check_cpu_capacity()
9120 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) in check_misfit_status() argument
9122 return rq->misfit_task_load && in check_misfit_status()
9123 (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || in check_misfit_status()
9124 check_cpu_capacity(rq, sd)); in check_misfit_status()
9334 sched_reduced_capacity(struct rq *rq, struct sched_domain *sd) in sched_reduced_capacity() argument
9340 if (rq->cfs.h_nr_running != 1) in sched_reduced_capacity()
9343 return check_cpu_capacity(rq, sd); in sched_reduced_capacity()
9367 struct rq *rq = cpu_rq(i); in update_sg_lb_stats() local
9368 unsigned long load = cpu_load(rq); in update_sg_lb_stats()
9372 sgs->group_runnable += cpu_runnable(rq); in update_sg_lb_stats()
9373 sgs->sum_h_nr_running += rq->cfs.h_nr_running; in update_sg_lb_stats()
9375 nr_running = rq->nr_running; in update_sg_lb_stats()
9385 sgs->nr_numa_running += rq->nr_numa_running; in update_sg_lb_stats()
9386 sgs->nr_preferred_running += rq->nr_preferred_running; in update_sg_lb_stats()
9402 if (sgs->group_misfit_task_load < rq->misfit_task_load) { in update_sg_lb_stats()
9403 sgs->group_misfit_task_load = rq->misfit_task_load; in update_sg_lb_stats()
9407 sched_reduced_capacity(rq, env->sd)) { in update_sg_lb_stats()
9565 static inline enum fbq_type fbq_classify_rq(struct rq *rq) in fbq_classify_rq() argument
9567 if (rq->nr_running > rq->nr_numa_running) in fbq_classify_rq()
9569 if (rq->nr_running > rq->nr_preferred_running) in fbq_classify_rq()
9579 static inline enum fbq_type fbq_classify_rq(struct rq *rq) in fbq_classify_rq() argument
9613 struct rq *rq = cpu_rq(cpu); in idle_cpu_without() local
9615 if (rq->curr != rq->idle && rq->curr != p) in idle_cpu_without()
9625 if (rq->ttwu_pending) in idle_cpu_without()
9653 struct rq *rq = cpu_rq(i); in update_sg_wakeup_stats() local
9656 sgs->group_load += cpu_load_without(rq, p); in update_sg_wakeup_stats()
9658 sgs->group_runnable += cpu_runnable_without(rq, p); in update_sg_wakeup_stats()
9660 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local; in update_sg_wakeup_stats()
9662 nr_running = rq->nr_running - local; in update_sg_wakeup_stats()
10399 static struct rq *find_busiest_queue(struct lb_env *env, in find_busiest_queue()
10402 struct rq *busiest = NULL, *rq; in find_busiest_queue() local
10417 rq = cpu_rq(i); in find_busiest_queue()
10418 rt = fbq_classify_rq(rq); in find_busiest_queue()
10442 nr_running = rq->cfs.h_nr_running; in find_busiest_queue()
10471 load = cpu_load(rq); in find_busiest_queue()
10474 !check_cpu_capacity(rq, env->sd)) in find_busiest_queue()
10493 busiest = rq; in find_busiest_queue()
10510 busiest = rq; in find_busiest_queue()
10517 busiest = rq; in find_busiest_queue()
10526 if (rq->misfit_task_load > busiest_load) { in find_busiest_queue()
10527 busiest_load = rq->misfit_task_load; in find_busiest_queue()
10528 busiest = rq; in find_busiest_queue()
10647 static int load_balance(int this_cpu, struct rq *this_rq, in load_balance()
10654 struct rq *busiest; in load_balance()
10961 struct rq *busiest_rq = data; in active_load_balance_cpu_stop()
10964 struct rq *target_rq = cpu_rq(target_cpu); in active_load_balance_cpu_stop()
11079 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) in rebalance_domains() argument
11082 int cpu = rq->cpu; in rebalance_domains()
11092 trace_android_rvh_sched_rebalance_domains(rq, &continue_balancing); in rebalance_domains()
11125 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { in rebalance_domains()
11150 rq->max_idle_balance_cost = in rebalance_domains()
11161 rq->next_balance = next_balance; in rebalance_domains()
11165 static inline int on_null_domain(struct rq *rq) in on_null_domain() argument
11167 return unlikely(!rcu_dereference_sched(rq->sd)); in on_null_domain()
11243 static void nohz_balancer_kick(struct rq *rq) in nohz_balancer_kick() argument
11248 int nr_busy, i, cpu = rq->cpu; in nohz_balancer_kick()
11252 if (unlikely(rq->idle_balance)) in nohz_balancer_kick()
11259 nohz_balance_exit_idle(rq); in nohz_balancer_kick()
11275 trace_android_rvh_sched_nohz_balancer_kick(rq, &flags, &done); in nohz_balancer_kick()
11279 if (rq->nr_running >= 2) { in nohz_balancer_kick()
11286 sd = rcu_dereference(rq->sd); in nohz_balancer_kick()
11293 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { in nohz_balancer_kick()
11320 if (check_misfit_status(rq, sd)) { in nohz_balancer_kick()
11378 void nohz_balance_exit_idle(struct rq *rq) in nohz_balance_exit_idle() argument
11380 SCHED_WARN_ON(rq != this_rq()); in nohz_balance_exit_idle()
11382 if (likely(!rq->nohz_tick_stopped)) in nohz_balance_exit_idle()
11385 rq->nohz_tick_stopped = 0; in nohz_balance_exit_idle()
11386 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask); in nohz_balance_exit_idle()
11389 set_cpu_sd_state_busy(rq->cpu); in nohz_balance_exit_idle()
11414 struct rq *rq = cpu_rq(cpu); in nohz_balance_enter_idle() local
11431 rq->has_blocked_load = 1; in nohz_balance_enter_idle()
11439 if (rq->nohz_tick_stopped) in nohz_balance_enter_idle()
11443 if (on_null_domain(rq)) in nohz_balance_enter_idle()
11446 rq->nohz_tick_stopped = 1; in nohz_balance_enter_idle()
11469 static bool update_nohz_stats(struct rq *rq) in update_nohz_stats() argument
11471 unsigned int cpu = rq->cpu; in update_nohz_stats()
11473 if (!rq->has_blocked_load) in update_nohz_stats()
11479 if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick))) in update_nohz_stats()
11484 return rq->has_blocked_load; in update_nohz_stats()
11492 static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags) in _nohz_idle_balance()
11501 struct rq *rq; in _nohz_idle_balance() local
11547 rq = cpu_rq(balance_cpu); in _nohz_idle_balance()
11550 has_blocked_load |= update_nohz_stats(rq); in _nohz_idle_balance()
11556 if (time_after_eq(jiffies, rq->next_balance)) { in _nohz_idle_balance()
11559 rq_lock_irqsave(rq, &rf); in _nohz_idle_balance()
11560 update_rq_clock(rq); in _nohz_idle_balance()
11561 rq_unlock_irqrestore(rq, &rf); in _nohz_idle_balance()
11564 rebalance_domains(rq, CPU_IDLE); in _nohz_idle_balance()
11567 if (time_after(next_balance, rq->next_balance)) { in _nohz_idle_balance()
11568 next_balance = rq->next_balance; in _nohz_idle_balance()
11595 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) in nohz_idle_balance()
11630 static void nohz_newidle_balance(struct rq *this_rq) in nohz_newidle_balance()
11658 static inline void nohz_balancer_kick(struct rq *rq) { } in nohz_balancer_kick() argument
11660 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) in nohz_idle_balance()
11665 static inline void nohz_newidle_balance(struct rq *this_rq) { } in nohz_newidle_balance()
11677 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) in newidle_balance()
11810 struct rq *this_rq = this_rq(); in run_rebalance_domains()
11833 void trigger_load_balance(struct rq *rq) in trigger_load_balance() argument
11839 if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq)))) in trigger_load_balance()
11842 if (time_after_eq(jiffies, rq->next_balance)) in trigger_load_balance()
11845 nohz_balancer_kick(rq); in trigger_load_balance()
11848 static void rq_online_fair(struct rq *rq) in rq_online_fair() argument
11852 update_runtime_enabled(rq); in rq_online_fair()
11855 static void rq_offline_fair(struct rq *rq) in rq_offline_fair() argument
11860 unthrottle_offline_cfs_rqs(rq); in rq_offline_fair()
11876 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) in task_tick_core() argument
11878 if (!sched_core_enabled(rq)) in task_tick_core()
11895 if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 && in task_tick_core()
11897 resched_curr(rq); in task_tick_core()
11918 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi) in task_vruntime_update() argument
11925 se_fi_update(se, rq->core->core_forceidle_seq, in_fi); in task_vruntime_update()
11930 struct rq *rq = task_rq(a); in cfs_prio_less() local
11937 SCHED_WARN_ON(task_rq(b)->core != rq->core); in cfs_prio_less()
11954 se_fi_update(sea, rq->core->core_forceidle_seq, in_fi); in cfs_prio_less()
11955 se_fi_update(seb, rq->core->core_forceidle_seq, in_fi); in cfs_prio_less()
11975 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {} in task_tick_core() argument
11986 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) in task_tick_fair() argument
11997 task_tick_numa(rq, curr); in task_tick_fair()
11999 update_misfit_status(curr, rq); in task_tick_fair()
12002 task_tick_core(rq, curr); in task_tick_fair()
12014 struct rq *rq = this_rq(); in task_fork_fair() local
12017 rq_lock(rq, &rf); in task_fork_fair()
12018 update_rq_clock(rq); in task_fork_fair()
12034 resched_curr(rq); in task_fork_fair()
12038 rq_unlock(rq, &rf); in task_fork_fair()
12046 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_fair() argument
12051 if (rq->cfs.nr_running == 1) in prio_changed_fair()
12059 if (task_current(rq, p)) { in prio_changed_fair()
12061 resched_curr(rq); in prio_changed_fair()
12063 check_preempt_curr(rq, p, 0); in prio_changed_fair()
12189 static void switched_from_fair(struct rq *rq, struct task_struct *p) in switched_from_fair() argument
12194 static void switched_to_fair(struct rq *rq, struct task_struct *p) in switched_to_fair() argument
12204 if (task_current(rq, p)) in switched_to_fair()
12205 resched_curr(rq); in switched_to_fair()
12207 check_preempt_curr(rq, p, 0); in switched_to_fair()
12216 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) in set_next_task_fair() argument
12226 list_move(&se->group_node, &rq->cfs_tasks); in set_next_task_fair()
12328 struct rq *rq; in online_fair_sched_group() local
12332 rq = cpu_rq(i); in online_fair_sched_group()
12334 rq_lock_irq(rq, &rf); in online_fair_sched_group()
12335 update_rq_clock(rq); in online_fair_sched_group()
12338 rq_unlock_irq(rq, &rf); in online_fair_sched_group()
12345 struct rq *rq; in unregister_fair_sched_group() local
12361 rq = cpu_rq(cpu); in unregister_fair_sched_group()
12363 raw_spin_rq_lock_irqsave(rq, flags); in unregister_fair_sched_group()
12365 raw_spin_rq_unlock_irqrestore(rq, flags); in unregister_fair_sched_group()
12373 struct rq *rq = cpu_rq(cpu); in init_tg_cfs_entry() local
12376 cfs_rq->rq = rq; in init_tg_cfs_entry()
12387 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
12421 struct rq *rq = cpu_rq(i); in __sched_group_set_shares() local
12426 rq_lock_irqsave(rq, &rf); in __sched_group_set_shares()
12427 update_rq_clock(rq); in __sched_group_set_shares()
12432 rq_unlock_irqrestore(rq, &rf); in __sched_group_set_shares()
12472 struct rq *rq = cpu_rq(i); in sched_group_set_idle() local
12479 rq_lock_irqsave(rq, &rf); in sched_group_set_idle()
12512 rq_unlock_irqrestore(rq, &rf); in sched_group_set_idle()
12541 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) in get_rr_interval_fair() argument
12550 if (rq->cfs.load.weight) in get_rr_interval_fair()