• Home
  • Raw
  • Download

Lines Matching refs:rq

294 	struct rq *rq = rq_of(cfs_rq);  in list_add_leaf_cfs_rq()  local
295 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq()
298 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
326 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
336 &rq->leaf_cfs_rq_list); in list_add_leaf_cfs_rq()
341 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
351 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); in list_add_leaf_cfs_rq()
356 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
363 struct rq *rq = rq_of(cfs_rq); in list_del_leaf_cfs_rq() local
372 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) in list_del_leaf_cfs_rq()
373 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; in list_del_leaf_cfs_rq()
380 static inline void assert_list_leaf_cfs_rq(struct rq *rq) in assert_list_leaf_cfs_rq() argument
382 SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list); in assert_list_leaf_cfs_rq()
386 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
387 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
455 struct rq *rq = task_rq(p); in cfs_rq_of() local
457 return &rq->cfs; in cfs_rq_of()
481 static inline void assert_list_leaf_cfs_rq(struct rq *rq) in assert_list_leaf_cfs_rq() argument
485 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
486 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
869 static void update_curr_fair(struct rq *rq) in update_curr_fair() argument
871 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
1192 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
1194 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); in account_numa_enqueue()
1195 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); in account_numa_enqueue()
1198 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
1200 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); in account_numa_dequeue()
1201 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); in account_numa_dequeue()
1479 static unsigned long cpu_runnable_load(struct rq *rq);
1498 struct rq *rq = cpu_rq(cpu); in update_numa_stats() local
1500 ns->load += cpu_runnable_load(rq); in update_numa_stats()
1525 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign() local
1528 if (xchg(&rq->numa_migrate_on, 1)) in task_numa_assign()
1536 rq = cpu_rq(env->best_cpu); in task_numa_assign()
1537 WRITE_ONCE(rq->numa_migrate_on, 0); in task_numa_assign()
1595 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
1757 struct rq *best_rq; in task_numa_migrate()
2675 static void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa() argument
2740 static void task_tick_numa(struct rq *rq, struct task_struct *curr) in task_tick_numa() argument
2744 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
2748 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
2764 struct rq *rq = rq_of(cfs_rq); in account_entity_enqueue() local
2766 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
2767 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
3113 struct rq *rq = rq_of(cfs_rq); in cfs_rq_util_change() local
3115 if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) { in cfs_rq_util_change()
3130 cpufreq_update_util(rq, flags); in cfs_rq_util_change()
3840 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) in update_misfit_status() argument
3846 rq->misfit_task_load = 0; in update_misfit_status()
3850 if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { in update_misfit_status()
3851 rq->misfit_task_load = 0; in update_misfit_status()
3859 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); in update_misfit_status()
3880 static inline int idle_balance(struct rq *rq, struct rq_flags *rf) in idle_balance() argument
3891 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} in update_misfit_status() argument
4558 struct rq *rq = data; in tg_unthrottle_up() local
4559 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up()
4563 cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) - in tg_unthrottle_up()
4576 struct rq *rq = data; in tg_throttle_down() local
4577 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down()
4581 cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq); in tg_throttle_down()
4591 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq() local
4621 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
4642 sub_nr_running(rq, task_delta); in throttle_cfs_rq()
4649 cfs_rq->throttled_clock = rq_clock(rq); in throttle_cfs_rq()
4655 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq() local
4660 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
4664 update_rq_clock(rq); in unthrottle_cfs_rq()
4667 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq()
4672 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
4713 add_nr_running(rq, task_delta); in unthrottle_cfs_rq()
4728 assert_list_leaf_cfs_rq(rq); in unthrottle_cfs_rq()
4731 if (rq->curr == rq->idle && rq->cfs.nr_running) in unthrottle_cfs_rq()
4732 resched_curr(rq); in unthrottle_cfs_rq()
4744 struct rq *rq = rq_of(cfs_rq); in distribute_cfs_runtime() local
4747 rq_lock_irqsave(rq, &rf); in distribute_cfs_runtime()
4766 rq_unlock_irqrestore(rq, &rf); in distribute_cfs_runtime()
5150 static void __maybe_unused update_runtime_enabled(struct rq *rq) in update_runtime_enabled() argument
5154 lockdep_assert_held(&rq->lock); in update_runtime_enabled()
5159 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled()
5169 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) in unthrottle_offline_cfs_rqs() argument
5173 lockdep_assert_held(&rq->lock); in unthrottle_offline_cfs_rqs()
5177 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in unthrottle_offline_cfs_rqs()
5239 static inline void update_runtime_enabled(struct rq *rq) {} in update_runtime_enabled() argument
5240 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} in unthrottle_offline_cfs_rqs() argument
5249 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
5254 SCHED_WARN_ON(task_rq(p) != rq); in hrtick_start_fair()
5256 if (rq->cfs.h_nr_running > 1) { in hrtick_start_fair()
5262 if (rq->curr == p) in hrtick_start_fair()
5263 resched_curr(rq); in hrtick_start_fair()
5266 hrtick_start(rq, delta); in hrtick_start_fair()
5275 static void hrtick_update(struct rq *rq) in hrtick_update() argument
5277 struct task_struct *curr = rq->curr; in hrtick_update()
5279 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) in hrtick_update()
5283 hrtick_start_fair(rq, curr); in hrtick_update()
5287 hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
5291 static inline void hrtick_update(struct rq *rq) in hrtick_update() argument
5304 static inline void update_overutilized_status(struct rq *rq) in update_overutilized_status() argument
5306 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { in update_overutilized_status()
5307 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); in update_overutilized_status()
5308 trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); in update_overutilized_status()
5312 static inline void update_overutilized_status(struct rq *rq) { } in update_overutilized_status() argument
5321 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_fair() argument
5334 util_est_enqueue(&rq->cfs, p); in enqueue_task_fair()
5342 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); in enqueue_task_fair()
5383 add_nr_running(rq, 1); in enqueue_task_fair()
5399 update_overutilized_status(rq); in enqueue_task_fair()
5418 assert_list_leaf_cfs_rq(rq); in enqueue_task_fair()
5420 hrtick_update(rq); in enqueue_task_fair()
5430 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_fair() argument
5480 sub_nr_running(rq, 1); in dequeue_task_fair()
5482 util_est_dequeue(&rq->cfs, p, task_sleep); in dequeue_task_fair()
5483 hrtick_update(rq); in dequeue_task_fair()
5507 struct rq *rq = cpu_rq(cpu); in sched_idle_cpu() local
5509 return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running && in sched_idle_cpu()
5510 rq->nr_running); in sched_idle_cpu()
5513 static unsigned long cpu_runnable_load(struct rq *rq) in cpu_runnable_load() argument
5515 return cfs_rq_runnable_load_avg(&rq->cfs); in cpu_runnable_load()
5525 struct rq *rq = cpu_rq(cpu); in cpu_avg_load_per_task() local
5526 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running); in cpu_avg_load_per_task()
5527 unsigned long load_avg = cpu_runnable_load(rq); in cpu_avg_load_per_task()
5845 struct rq *rq = cpu_rq(i); in find_idlest_group_cpu() local
5846 struct cpuidle_state *idle = idle_get_state(rq); in find_idlest_group_cpu()
5854 latest_idle_timestamp = rq->idle_stamp; in find_idlest_group_cpu()
5857 rq->idle_stamp > latest_idle_timestamp) { in find_idlest_group_cpu()
5863 latest_idle_timestamp = rq->idle_stamp; in find_idlest_group_cpu()
5971 void __update_idle_core(struct rq *rq) in __update_idle_core() argument
5973 int core = cpu_of(rq); in __update_idle_core()
6824 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_fair() argument
6826 if (rq->nr_running) in balance_fair()
6829 return newidle_balance(rq, rf) != 0; in balance_fair()
6915 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) in check_preempt_wakeup() argument
6917 struct task_struct *curr = rq->curr; in check_preempt_wakeup()
6981 resched_curr(rq); in check_preempt_wakeup()
6991 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
6999 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task_fair() argument
7001 struct cfs_rq *cfs_rq = &rq->cfs; in pick_next_task_fair()
7007 if (!sched_fair_runnable(rq)) in pick_next_task_fair()
7044 cfs_rq = &rq->cfs; in pick_next_task_fair()
7089 put_prev_task(rq, prev); in pick_next_task_fair()
7106 list_move(&p->se.group_node, &rq->cfs_tasks); in pick_next_task_fair()
7109 if (hrtick_enabled(rq)) in pick_next_task_fair()
7110 hrtick_start_fair(rq, p); in pick_next_task_fair()
7112 update_misfit_status(p, rq); in pick_next_task_fair()
7120 new_tasks = newidle_balance(rq, rf); in pick_next_task_fair()
7137 update_idle_rq_clock_pelt(rq); in pick_next_task_fair()
7145 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) in put_prev_task_fair() argument
7161 static void yield_task_fair(struct rq *rq) in yield_task_fair() argument
7163 struct task_struct *curr = rq->curr; in yield_task_fair()
7170 if (unlikely(rq->nr_running == 1)) in yield_task_fair()
7176 update_rq_clock(rq); in yield_task_fair()
7186 rq_clock_skip_update(rq); in yield_task_fair()
7192 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) in yield_to_task_fair() argument
7203 yield_task_fair(rq); in yield_to_task_fair()
7348 struct rq *src_rq;
7352 struct rq *dst_rq;
7700 static void attach_task(struct rq *rq, struct task_struct *p) in attach_task() argument
7702 lockdep_assert_held(&rq->lock); in attach_task()
7704 BUG_ON(task_rq(p) != rq); in attach_task()
7705 activate_task(rq, p, ENQUEUE_NOCLOCK); in attach_task()
7706 check_preempt_curr(rq, p, 0); in attach_task()
7713 static void attach_one_task(struct rq *rq, struct task_struct *p) in attach_one_task() argument
7717 rq_lock(rq, &rf); in attach_one_task()
7718 update_rq_clock(rq); in attach_one_task()
7719 attach_task(rq, p); in attach_one_task()
7720 rq_unlock(rq, &rf); in attach_one_task()
7758 static inline bool others_have_blocked(struct rq *rq) in others_have_blocked() argument
7760 if (READ_ONCE(rq->avg_rt.util_avg)) in others_have_blocked()
7763 if (READ_ONCE(rq->avg_dl.util_avg)) in others_have_blocked()
7767 if (READ_ONCE(rq->avg_irq.util_avg)) in others_have_blocked()
7774 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) in update_blocked_load_status() argument
7776 rq->last_blocked_load_update_tick = jiffies; in update_blocked_load_status()
7779 rq->has_blocked_load = 0; in update_blocked_load_status()
7783 static inline bool others_have_blocked(struct rq *rq) { return false; } in others_have_blocked() argument
7784 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {} in update_blocked_load_status() argument
7787 static bool __update_blocked_others(struct rq *rq, bool *done) in __update_blocked_others() argument
7790 u64 now = rq_clock_pelt(rq); in __update_blocked_others()
7797 curr_class = rq->curr->sched_class; in __update_blocked_others()
7799 decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | in __update_blocked_others()
7800 update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | in __update_blocked_others()
7801 update_irq_load_avg(rq, 0); in __update_blocked_others()
7803 if (others_have_blocked(rq)) in __update_blocked_others()
7828 static bool __update_blocked_fair(struct rq *rq, bool *done) in __update_blocked_fair() argument
7832 int cpu = cpu_of(rq); in __update_blocked_fair()
7838 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { in __update_blocked_fair()
7844 if (cfs_rq == &rq->cfs) in __update_blocked_fair()
7875 struct rq *rq = rq_of(cfs_rq); in update_cfs_rq_h_load() local
7876 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
7915 static bool __update_blocked_fair(struct rq *rq, bool *done) in __update_blocked_fair() argument
7917 struct cfs_rq *cfs_rq = &rq->cfs; in __update_blocked_fair()
7936 struct rq *rq = cpu_rq(cpu); in update_blocked_averages() local
7939 rq_lock_irqsave(rq, &rf); in update_blocked_averages()
7940 update_rq_clock(rq); in update_blocked_averages()
7942 decayed |= __update_blocked_others(rq, &done); in update_blocked_averages()
7943 decayed |= __update_blocked_fair(rq, &done); in update_blocked_averages()
7945 update_blocked_load_status(rq, !done); in update_blocked_averages()
7947 cpufreq_update_util(rq, 0); in update_blocked_averages()
7948 rq_unlock_irqrestore(rq, &rf); in update_blocked_averages()
8014 struct rq *rq = cpu_rq(cpu); in scale_rt_capacity() local
8018 irq = cpu_util_irq(rq); in scale_rt_capacity()
8023 used = READ_ONCE(rq->avg_rt.util_avg); in scale_rt_capacity()
8024 used += READ_ONCE(rq->avg_dl.util_avg); in scale_rt_capacity()
8113 struct rq *rq = cpu_rq(cpu); in update_group_capacity() local
8126 if (unlikely(!rq->sd)) { in update_group_capacity()
8129 sgc = rq->sd->groups->sgc; in update_group_capacity()
8164 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) in check_cpu_capacity() argument
8166 return ((rq->cpu_capacity * sd->imbalance_pct) < in check_cpu_capacity()
8167 (rq->cpu_capacity_orig * 100)); in check_cpu_capacity()
8175 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) in check_misfit_status() argument
8177 return rq->misfit_task_load && in check_misfit_status()
8178 (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity.val || in check_misfit_status()
8179 check_cpu_capacity(rq, sd)); in check_misfit_status()
8298 static bool update_nohz_stats(struct rq *rq, bool force) in update_nohz_stats() argument
8301 unsigned int cpu = rq->cpu; in update_nohz_stats()
8303 if (!rq->has_blocked_load) in update_nohz_stats()
8309 if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick)) in update_nohz_stats()
8314 return rq->has_blocked_load; in update_nohz_stats()
8337 struct rq *rq = cpu_rq(i); in update_sg_lb_stats() local
8339 if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) in update_sg_lb_stats()
8342 sgs->group_load += cpu_runnable_load(rq); in update_sg_lb_stats()
8344 sgs->sum_nr_running += rq->cfs.h_nr_running; in update_sg_lb_stats()
8346 nr_running = rq->nr_running; in update_sg_lb_stats()
8354 sgs->nr_numa_running += rq->nr_numa_running; in update_sg_lb_stats()
8355 sgs->nr_preferred_running += rq->nr_preferred_running; in update_sg_lb_stats()
8364 sgs->group_misfit_task_load < rq->misfit_task_load) { in update_sg_lb_stats()
8365 sgs->group_misfit_task_load = rq->misfit_task_load; in update_sg_lb_stats()
8480 static inline enum fbq_type fbq_classify_rq(struct rq *rq) in fbq_classify_rq() argument
8482 if (rq->nr_running > rq->nr_numa_running) in fbq_classify_rq()
8484 if (rq->nr_running > rq->nr_preferred_running) in fbq_classify_rq()
8494 static inline enum fbq_type fbq_classify_rq(struct rq *rq) in fbq_classify_rq() argument
8942 static struct rq *find_busiest_queue(struct lb_env *env, in find_busiest_queue()
8945 struct rq *busiest = NULL, *rq; in find_busiest_queue() local
8953 rq = cpu_rq(i); in find_busiest_queue()
8954 rt = fbq_classify_rq(rq); in find_busiest_queue()
8983 if (rq->misfit_task_load > busiest_load) { in find_busiest_queue()
8984 busiest_load = rq->misfit_task_load; in find_busiest_queue()
8985 busiest = rq; in find_busiest_queue()
9001 rq->nr_running == 1) in find_busiest_queue()
9004 load = cpu_runnable_load(rq); in find_busiest_queue()
9011 if (rq->nr_running == 1 && load > env->imbalance && in find_busiest_queue()
9012 !check_cpu_capacity(rq, env->sd)) in find_busiest_queue()
9029 busiest = rq; in find_busiest_queue()
9138 static int load_balance(int this_cpu, struct rq *this_rq, in load_balance()
9145 struct rq *busiest; in load_balance()
9449 struct rq *busiest_rq = data; in active_load_balance_cpu_stop()
9452 struct rq *target_rq = cpu_rq(target_cpu); in active_load_balance_cpu_stop()
9549 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) in rebalance_domains() argument
9552 int cpu = rq->cpu; in rebalance_domains()
9598 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { in rebalance_domains()
9622 rq->max_idle_balance_cost = in rebalance_domains()
9633 rq->next_balance = next_balance; in rebalance_domains()
9644 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) in rebalance_domains()
9645 nohz.next_balance = rq->next_balance; in rebalance_domains()
9650 static inline int on_null_domain(struct rq *rq) in on_null_domain() argument
9652 return unlikely(!rcu_dereference_sched(rq->sd)); in on_null_domain()
9715 static void nohz_balancer_kick(struct rq *rq) in nohz_balancer_kick() argument
9720 int nr_busy, i, cpu = rq->cpu; in nohz_balancer_kick()
9723 if (unlikely(rq->idle_balance)) in nohz_balancer_kick()
9730 nohz_balance_exit_idle(rq); in nohz_balancer_kick()
9746 if (rq->nr_running >= 2) { in nohz_balancer_kick()
9753 sd = rcu_dereference(rq->sd); in nohz_balancer_kick()
9760 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { in nohz_balancer_kick()
9787 if (check_misfit_status(rq, sd)) { in nohz_balancer_kick()
9842 void nohz_balance_exit_idle(struct rq *rq) in nohz_balance_exit_idle() argument
9844 SCHED_WARN_ON(rq != this_rq()); in nohz_balance_exit_idle()
9846 if (likely(!rq->nohz_tick_stopped)) in nohz_balance_exit_idle()
9849 rq->nohz_tick_stopped = 0; in nohz_balance_exit_idle()
9850 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask); in nohz_balance_exit_idle()
9853 set_cpu_sd_state_busy(rq->cpu); in nohz_balance_exit_idle()
9878 struct rq *rq = cpu_rq(cpu); in nohz_balance_enter_idle() local
9895 rq->has_blocked_load = 1; in nohz_balance_enter_idle()
9903 if (rq->nohz_tick_stopped) in nohz_balance_enter_idle()
9907 if (on_null_domain(rq)) in nohz_balance_enter_idle()
9910 rq->nohz_tick_stopped = 1; in nohz_balance_enter_idle()
9939 static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, in _nohz_idle_balance()
9950 struct rq *rq; in _nohz_idle_balance() local
9984 rq = cpu_rq(balance_cpu); in _nohz_idle_balance()
9986 has_blocked_load |= update_nohz_stats(rq, true); in _nohz_idle_balance()
9992 if (time_after_eq(jiffies, rq->next_balance)) { in _nohz_idle_balance()
9995 rq_lock_irqsave(rq, &rf); in _nohz_idle_balance()
9996 update_rq_clock(rq); in _nohz_idle_balance()
9997 rq_unlock_irqrestore(rq, &rf); in _nohz_idle_balance()
10000 rebalance_domains(rq, CPU_IDLE); in _nohz_idle_balance()
10003 if (time_after(next_balance, rq->next_balance)) { in _nohz_idle_balance()
10004 next_balance = rq->next_balance; in _nohz_idle_balance()
10044 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) in nohz_idle_balance()
10067 static void nohz_newidle_balance(struct rq *this_rq) in nohz_newidle_balance()
10100 static inline void nohz_balancer_kick(struct rq *rq) { } in nohz_balancer_kick() argument
10102 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) in nohz_idle_balance()
10107 static inline void nohz_newidle_balance(struct rq *this_rq) { } in nohz_newidle_balance()
10114 int newidle_balance(struct rq *this_rq, struct rq_flags *rf) in newidle_balance()
10234 struct rq *this_rq = this_rq(); in run_rebalance_domains()
10257 void trigger_load_balance(struct rq *rq) in trigger_load_balance() argument
10260 if (unlikely(on_null_domain(rq))) in trigger_load_balance()
10263 if (time_after_eq(jiffies, rq->next_balance)) in trigger_load_balance()
10266 nohz_balancer_kick(rq); in trigger_load_balance()
10269 static void rq_online_fair(struct rq *rq) in rq_online_fair() argument
10273 update_runtime_enabled(rq); in rq_online_fair()
10276 static void rq_offline_fair(struct rq *rq) in rq_offline_fair() argument
10281 unthrottle_offline_cfs_rqs(rq); in rq_offline_fair()
10294 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) in task_tick_fair() argument
10305 task_tick_numa(rq, curr); in task_tick_fair()
10307 update_misfit_status(curr, rq); in task_tick_fair()
10320 struct rq *rq = this_rq(); in task_fork_fair() local
10323 rq_lock(rq, &rf); in task_fork_fair()
10324 update_rq_clock(rq); in task_fork_fair()
10340 resched_curr(rq); in task_fork_fair()
10344 rq_unlock(rq, &rf); in task_fork_fair()
10352 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_fair() argument
10362 if (rq->curr == p) { in prio_changed_fair()
10364 resched_curr(rq); in prio_changed_fair()
10366 check_preempt_curr(rq, p, 0); in prio_changed_fair()
10486 static void switched_from_fair(struct rq *rq, struct task_struct *p) in switched_from_fair() argument
10491 static void switched_to_fair(struct rq *rq, struct task_struct *p) in switched_to_fair() argument
10501 if (rq->curr == p) in switched_to_fair()
10502 resched_curr(rq); in switched_to_fair()
10504 check_preempt_curr(rq, p, 0); in switched_to_fair()
10513 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) in set_next_task_fair() argument
10523 list_move(&se->group_node, &rq->cfs_tasks); in set_next_task_fair()
10644 struct rq *rq; in online_fair_sched_group() local
10648 rq = cpu_rq(i); in online_fair_sched_group()
10650 rq_lock_irq(rq, &rf); in online_fair_sched_group()
10651 update_rq_clock(rq); in online_fair_sched_group()
10654 rq_unlock_irq(rq, &rf); in online_fair_sched_group()
10661 struct rq *rq; in unregister_fair_sched_group() local
10675 rq = cpu_rq(cpu); in unregister_fair_sched_group()
10677 raw_spin_lock_irqsave(&rq->lock, flags); in unregister_fair_sched_group()
10679 raw_spin_unlock_irqrestore(&rq->lock, flags); in unregister_fair_sched_group()
10687 struct rq *rq = cpu_rq(cpu); in init_tg_cfs_entry() local
10690 cfs_rq->rq = rq; in init_tg_cfs_entry()
10701 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
10734 struct rq *rq = cpu_rq(i); in sched_group_set_shares() local
10739 rq_lock_irqsave(rq, &rf); in sched_group_set_shares()
10740 update_rq_clock(rq); in sched_group_set_shares()
10745 rq_unlock_irqrestore(rq, &rf); in sched_group_set_shares()
10768 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) in get_rr_interval_fair() argument
10777 if (rq->cfs.load.weight) in get_rr_interval_fair()
10915 const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq) in sched_trace_rq_avg_rt() argument
10918 return rq ? &rq->avg_rt : NULL; in sched_trace_rq_avg_rt()
10925 const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq) in sched_trace_rq_avg_dl() argument
10928 return rq ? &rq->avg_dl : NULL; in sched_trace_rq_avg_dl()
10935 const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq) in sched_trace_rq_avg_irq() argument
10938 return rq ? &rq->avg_irq : NULL; in sched_trace_rq_avg_irq()
10945 int sched_trace_rq_cpu(struct rq *rq) in sched_trace_rq_cpu() argument
10947 return rq ? cpu_of(rq) : -1; in sched_trace_rq_cpu()