• Home
  • Raw
  • Download

Lines Matching refs:cfs_rq

355 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)  in list_add_leaf_cfs_rq()  argument
357 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq()
360 if (cfs_rq->on_list) in list_add_leaf_cfs_rq()
363 cfs_rq->on_list = 1; in list_add_leaf_cfs_rq()
374 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq()
375 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { in list_add_leaf_cfs_rq()
382 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
383 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); in list_add_leaf_cfs_rq()
393 if (!cfs_rq->tg->parent) { in list_add_leaf_cfs_rq()
398 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
414 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); in list_add_leaf_cfs_rq()
419 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
423 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
425 if (cfs_rq->on_list) { in list_del_leaf_cfs_rq()
426 struct rq *rq = rq_of(cfs_rq); in list_del_leaf_cfs_rq()
435 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) in list_del_leaf_cfs_rq()
436 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; in list_del_leaf_cfs_rq()
438 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); in list_del_leaf_cfs_rq()
439 cfs_rq->on_list = 0; in list_del_leaf_cfs_rq()
449 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
450 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
454 static inline struct cfs_rq *
457 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
458 return se->cfs_rq; in is_same_group()
505 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) in cfs_rq_is_idle() argument
507 return cfs_rq->idle > 0; in cfs_rq_is_idle()
522 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
527 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
535 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
536 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
553 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) in cfs_rq_is_idle() argument
566 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
599 static void update_min_vruntime(struct cfs_rq *cfs_rq) in update_min_vruntime() argument
601 struct sched_entity *curr = cfs_rq->curr; in update_min_vruntime()
602 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); in update_min_vruntime()
604 u64 vruntime = cfs_rq->min_vruntime; in update_min_vruntime()
623 u64_u32_store(cfs_rq->min_vruntime, in update_min_vruntime()
624 max_vruntime(cfs_rq->min_vruntime, vruntime)); in update_min_vruntime()
635 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
637 trace_android_rvh_enqueue_entity(cfs_rq, se); in __enqueue_entity()
638 rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less); in __enqueue_entity()
641 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
643 trace_android_rvh_dequeue_entity(cfs_rq, se); in __dequeue_entity()
644 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); in __dequeue_entity()
647 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) in __pick_first_entity() argument
649 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); in __pick_first_entity()
668 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) in __pick_last_entity() argument
670 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); in __pick_last_entity()
727 static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq);
735 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice() argument
737 unsigned int nr_running = cfs_rq->nr_running; in sched_slice()
743 nr_running = rq_of(cfs_rq)->cfs.h_nr_running; in sched_slice()
750 struct cfs_rq *qcfs_rq; in sched_slice()
765 if (se_is_idle(init_se) && !sched_idle_cfs_rq(cfs_rq)) in sched_slice()
781 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice() argument
783 return calc_delta_fair(sched_slice(cfs_rq, se), se); in sched_vslice()
841 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg() local
843 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg()
844 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg()
857 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
862 if (cfs_rq->avg.util_avg != 0) { in post_init_entity_util_avg()
863 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
864 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg()
886 static void update_tg_load_avg(struct cfs_rq *cfs_rq) in update_tg_load_avg() argument
894 static void update_curr(struct cfs_rq *cfs_rq) in update_curr() argument
896 struct sched_entity *curr = cfs_rq->curr; in update_curr()
897 u64 now = rq_clock_task(rq_of(cfs_rq)); in update_curr()
918 schedstat_add(cfs_rq->exec_clock, delta_exec); in update_curr()
921 update_min_vruntime(cfs_rq); in update_curr()
931 account_cfs_rq_runtime(cfs_rq, delta_exec); in update_curr()
940 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start_fair() argument
953 __update_stats_wait_start(rq_of(cfs_rq), p, stats); in update_stats_wait_start_fair()
957 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end_fair() argument
979 __update_stats_wait_end(rq_of(cfs_rq), p, stats); in update_stats_wait_end_fair()
983 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper_fair() argument
996 __update_stats_enqueue_sleeper(rq_of(cfs_rq), tsk, stats); in update_stats_enqueue_sleeper_fair()
1003 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue_fair() argument
1012 if (se != cfs_rq->curr) in update_stats_enqueue_fair()
1013 update_stats_wait_start_fair(cfs_rq, se); in update_stats_enqueue_fair()
1016 update_stats_enqueue_sleeper_fair(cfs_rq, se); in update_stats_enqueue_fair()
1020 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue_fair() argument
1030 if (se != cfs_rq->curr) in update_stats_dequeue_fair()
1031 update_stats_wait_end_fair(cfs_rq, se); in update_stats_dequeue_fair()
1041 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue_fair()
1044 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue_fair()
1052 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1057 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
3215 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
3217 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
3220 struct rq *rq = rq_of(cfs_rq); in account_entity_enqueue()
3226 cfs_rq->nr_running++; in account_entity_enqueue()
3228 cfs_rq->idle_nr_running++; in account_entity_enqueue()
3232 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
3234 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
3237 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
3241 cfs_rq->nr_running--; in account_entity_dequeue()
3243 cfs_rq->idle_nr_running--; in account_entity_dequeue()
3296 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
3298 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3299 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3303 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
3305 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3306 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3308 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in dequeue_load_avg()
3309 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in dequeue_load_avg()
3313 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
3315 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
3318 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
3323 if (cfs_rq->curr == se) in reweight_entity()
3324 update_curr(cfs_rq); in reweight_entity()
3325 update_load_sub(&cfs_rq->load, se->load.weight); in reweight_entity()
3327 dequeue_load_avg(cfs_rq, se); in reweight_entity()
3339 enqueue_load_avg(cfs_rq, se); in reweight_entity()
3341 update_load_add(&cfs_rq->load, se->load.weight); in reweight_entity()
3348 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task() local
3352 reweight_entity(cfs_rq, se, weight); in reweight_task()
3357 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3434 static long calc_group_shares(struct cfs_rq *cfs_rq) in calc_group_shares() argument
3437 struct task_group *tg = cfs_rq->tg; in calc_group_shares()
3441 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); in calc_group_shares()
3446 tg_weight -= cfs_rq->tg_load_avg_contrib; in calc_group_shares()
3475 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3502 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) in cfs_rq_util_change() argument
3504 struct rq *rq = rq_of(cfs_rq); in cfs_rq_util_change()
3506 if (&rq->cfs == cfs_rq) { in cfs_rq_util_change()
3549 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) in cfs_rq_last_update_time() argument
3551 return u64_u32_load_copy(cfs_rq->avg.last_update_time, in cfs_rq_last_update_time()
3552 cfs_rq->last_update_time_copy); in cfs_rq_last_update_time()
3563 static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq) in child_cfs_rq_on_list() argument
3565 struct cfs_rq *prev_cfs_rq; in child_cfs_rq_on_list()
3568 if (cfs_rq->on_list) { in child_cfs_rq_on_list()
3569 prev = cfs_rq->leaf_cfs_rq_list.prev; in child_cfs_rq_on_list()
3571 struct rq *rq = rq_of(cfs_rq); in child_cfs_rq_on_list()
3576 prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list); in child_cfs_rq_on_list()
3578 return (prev_cfs_rq->tg->parent == cfs_rq->tg); in child_cfs_rq_on_list()
3581 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) in cfs_rq_is_decayed() argument
3583 if (cfs_rq->load.weight) in cfs_rq_is_decayed()
3586 if (!load_avg_is_decayed(&cfs_rq->avg)) in cfs_rq_is_decayed()
3589 if (child_cfs_rq_on_list(cfs_rq)) in cfs_rq_is_decayed()
3609 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) in update_tg_load_avg() argument
3611 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; in update_tg_load_avg()
3616 if (cfs_rq->tg == &root_task_group) in update_tg_load_avg()
3619 if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { in update_tg_load_avg()
3620 atomic_long_add(delta, &cfs_rq->tg->load_avg); in update_tg_load_avg()
3621 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; in update_tg_load_avg()
3631 struct cfs_rq *prev, struct cfs_rq *next) in set_task_rq_fair()
3724 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
3737 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_util()
3747 add_positive(&cfs_rq->avg.util_avg, delta_avg); in update_tg_cfs_util()
3748 add_positive(&cfs_rq->avg.util_sum, delta_sum); in update_tg_cfs_util()
3751 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in update_tg_cfs_util()
3752 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in update_tg_cfs_util()
3756 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
3769 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_runnable()
3778 add_positive(&cfs_rq->avg.runnable_avg, delta_avg); in update_tg_cfs_runnable()
3779 add_positive(&cfs_rq->avg.runnable_sum, delta_sum); in update_tg_cfs_runnable()
3781 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in update_tg_cfs_runnable()
3782 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in update_tg_cfs_runnable()
3786 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_load() argument
3803 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_load()
3846 add_positive(&cfs_rq->avg.load_avg, delta_avg); in update_tg_cfs_load()
3847 add_positive(&cfs_rq->avg.load_sum, delta_sum); in update_tg_cfs_load()
3849 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in update_tg_cfs_load()
3850 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in update_tg_cfs_load()
3853 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) in add_tg_cfs_propagate() argument
3855 cfs_rq->propagate = 1; in add_tg_cfs_propagate()
3856 cfs_rq->prop_runnable_sum += runnable_sum; in add_tg_cfs_propagate()
3862 struct cfs_rq *cfs_rq, *gcfs_rq; in propagate_entity_load_avg() local
3873 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
3875 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); in propagate_entity_load_avg()
3877 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3878 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3879 update_tg_cfs_load(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3881 trace_pelt_cfs_tp(cfs_rq); in propagate_entity_load_avg()
3893 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
3919 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {} in update_tg_load_avg() argument
3926 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} in add_tg_cfs_propagate() argument
3934 struct cfs_rq *cfs_rq; in migrate_se_pelt_lag() local
3941 cfs_rq = cfs_rq_of(se); in migrate_se_pelt_lag()
3942 rq = rq_of(cfs_rq); in migrate_se_pelt_lag()
3982 throttled = u64_u32_load(cfs_rq->throttled_pelt_idle); in migrate_se_pelt_lag()
3995 lut = cfs_rq_last_update_time(cfs_rq); in migrate_se_pelt_lag()
4029 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument
4032 struct sched_avg *sa = &cfs_rq->avg; in update_cfs_rq_load_avg()
4035 if (cfs_rq->removed.nr) { in update_cfs_rq_load_avg()
4037 u32 divider = get_pelt_divider(&cfs_rq->avg); in update_cfs_rq_load_avg()
4039 raw_spin_lock(&cfs_rq->removed.lock); in update_cfs_rq_load_avg()
4040 swap(cfs_rq->removed.util_avg, removed_util); in update_cfs_rq_load_avg()
4041 swap(cfs_rq->removed.load_avg, removed_load); in update_cfs_rq_load_avg()
4042 swap(cfs_rq->removed.runnable_avg, removed_runnable); in update_cfs_rq_load_avg()
4043 cfs_rq->removed.nr = 0; in update_cfs_rq_load_avg()
4044 raw_spin_unlock(&cfs_rq->removed.lock); in update_cfs_rq_load_avg()
4079 add_tg_cfs_propagate(cfs_rq, in update_cfs_rq_load_avg()
4085 decayed |= __update_load_avg_cfs_rq(now, cfs_rq); in update_cfs_rq_load_avg()
4087 cfs_rq->last_update_time_copy, in update_cfs_rq_load_avg()
4100 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
4106 u32 divider = get_pelt_divider(&cfs_rq->avg); in attach_entity_load_avg()
4115 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4116 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4134 trace_android_rvh_attach_entity_load_avg(cfs_rq, se); in attach_entity_load_avg()
4136 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
4137 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4138 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4139 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4140 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4142 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4144 cfs_rq_util_change(cfs_rq, 0); in attach_entity_load_avg()
4146 trace_pelt_cfs_tp(cfs_rq); in attach_entity_load_avg()
4157 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
4159 trace_android_rvh_detach_entity_load_avg(cfs_rq, se); in detach_entity_load_avg()
4161 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
4162 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4163 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4165 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in detach_entity_load_avg()
4166 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4168 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4169 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4171 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in detach_entity_load_avg()
4172 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4174 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4176 cfs_rq_util_change(cfs_rq, 0); in detach_entity_load_avg()
4178 trace_pelt_cfs_tp(cfs_rq); in detach_entity_load_avg()
4190 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
4192 u64 now = cfs_rq_clock_pelt(cfs_rq); in update_load_avg()
4200 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
4202 decayed = update_cfs_rq_load_avg(now, cfs_rq); in update_load_avg()
4205 trace_android_rvh_update_load_avg(now, cfs_rq, se); in update_load_avg()
4216 attach_entity_load_avg(cfs_rq, se); in update_load_avg()
4217 update_tg_load_avg(cfs_rq); in update_load_avg()
4224 detach_entity_load_avg(cfs_rq, se); in update_load_avg()
4225 update_tg_load_avg(cfs_rq); in update_load_avg()
4227 cfs_rq_util_change(cfs_rq, 0); in update_load_avg()
4230 update_tg_load_avg(cfs_rq); in update_load_avg()
4240 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg() local
4243 last_update_time = cfs_rq_last_update_time(cfs_rq); in sync_entity_load_avg()
4253 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg() local
4264 trace_android_rvh_remove_entity_load_avg(cfs_rq, se); in remove_entity_load_avg()
4266 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); in remove_entity_load_avg()
4267 ++cfs_rq->removed.nr; in remove_entity_load_avg()
4268 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4269 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4270 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4271 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); in remove_entity_load_avg()
4274 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) in cfs_rq_runnable_avg() argument
4276 return cfs_rq->avg.runnable_avg; in cfs_rq_runnable_avg()
4279 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) in cfs_rq_load_avg() argument
4281 return cfs_rq->avg.load_avg; in cfs_rq_load_avg()
4303 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, in util_est_enqueue() argument
4312 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_enqueue()
4314 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_enqueue()
4316 trace_sched_util_est_cfs_tp(cfs_rq); in util_est_enqueue()
4319 static inline void util_est_dequeue(struct cfs_rq *cfs_rq, in util_est_dequeue() argument
4328 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_dequeue()
4330 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_dequeue()
4332 trace_sched_util_est_cfs_tp(cfs_rq); in util_est_dequeue()
4350 static inline void util_est_update(struct cfs_rq *cfs_rq, in util_est_update() argument
4358 trace_android_rvh_util_est_update(cfs_rq, p, task_sleep, &ret); in util_est_update()
4411 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) in util_est_update()
4605 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) in cfs_rq_is_decayed() argument
4607 return !cfs_rq->nr_running; in cfs_rq_is_decayed()
4615 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
4617 cfs_rq_util_change(cfs_rq, 0); in update_load_avg()
4623 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in attach_entity_load_avg() argument
4625 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
4633 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_enqueue() argument
4636 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_dequeue() argument
4639 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, in util_est_update() argument
4645 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread() argument
4648 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread()
4654 schedstat_inc(cfs_rq->nr_spread_over); in check_spread()
4660 struct cfs_rq *cfs_rq; in entity_is_long_sleeper() local
4666 cfs_rq = cfs_rq_of(se); in entity_is_long_sleeper()
4668 sleep_time = rq_clock_task(rq_of(cfs_rq)); in entity_is_long_sleeper()
4682 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity() argument
4684 u64 vruntime = cfs_rq->min_vruntime; in place_entity()
4693 vruntime += sched_vslice(cfs_rq, se); in place_entity()
4714 trace_android_rvh_place_entity(cfs_rq, se, initial, &vruntime); in place_entity()
4740 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
4775 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
4778 bool curr = cfs_rq->curr == se; in enqueue_entity()
4785 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
4787 update_curr(cfs_rq); in enqueue_entity()
4796 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
4807 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
4810 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
4813 place_entity(cfs_rq, se, 0); in enqueue_entity()
4819 update_stats_enqueue_fair(cfs_rq, se, flags); in enqueue_entity()
4820 check_spread(cfs_rq, se); in enqueue_entity()
4822 __enqueue_entity(cfs_rq, se); in enqueue_entity()
4825 if (cfs_rq->nr_running == 1) { in enqueue_entity()
4826 check_enqueue_throttle(cfs_rq); in enqueue_entity()
4827 if (!throttled_hierarchy(cfs_rq)) in enqueue_entity()
4828 list_add_leaf_cfs_rq(cfs_rq); in enqueue_entity()
4835 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last() local
4836 if (cfs_rq->last != se) in __clear_buddies_last()
4839 cfs_rq->last = NULL; in __clear_buddies_last()
4846 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next() local
4847 if (cfs_rq->next != se) in __clear_buddies_next()
4850 cfs_rq->next = NULL; in __clear_buddies_next()
4857 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip() local
4858 if (cfs_rq->skip != se) in __clear_buddies_skip()
4861 cfs_rq->skip = NULL; in __clear_buddies_skip()
4865 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
4867 if (cfs_rq->last == se) in clear_buddies()
4870 if (cfs_rq->next == se) in clear_buddies()
4873 if (cfs_rq->skip == se) in clear_buddies()
4877 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4880 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
4890 update_curr(cfs_rq); in dequeue_entity()
4901 update_load_avg(cfs_rq, se, action); in dequeue_entity()
4904 update_stats_dequeue_fair(cfs_rq, se, flags); in dequeue_entity()
4906 clear_buddies(cfs_rq, se); in dequeue_entity()
4908 if (se != cfs_rq->curr) in dequeue_entity()
4909 __dequeue_entity(cfs_rq, se); in dequeue_entity()
4911 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
4920 se->vruntime -= cfs_rq->min_vruntime; in dequeue_entity()
4923 return_cfs_rq_runtime(cfs_rq); in dequeue_entity()
4934 update_min_vruntime(cfs_rq); in dequeue_entity()
4936 if (cfs_rq->nr_running == 0) in dequeue_entity()
4937 update_idle_cfs_rq_clock_pelt(cfs_rq); in dequeue_entity()
4944 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) in check_preempt_tick() argument
4956 ideal_runtime = min_t(u64, sched_slice(cfs_rq, curr), sysctl_sched_latency); in check_preempt_tick()
4960 delta_exec, cfs_rq, curr, sysctl_sched_min_granularity); in check_preempt_tick()
4964 resched_curr(rq_of(cfs_rq)); in check_preempt_tick()
4969 clear_buddies(cfs_rq, curr); in check_preempt_tick()
4981 se = __pick_first_entity(cfs_rq); in check_preempt_tick()
4988 resched_curr(rq_of(cfs_rq)); in check_preempt_tick()
4991 void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
4993 clear_buddies(cfs_rq, se); in set_next_entity()
5002 update_stats_wait_end_fair(cfs_rq, se); in set_next_entity()
5003 __dequeue_entity(cfs_rq, se); in set_next_entity()
5004 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
5007 update_stats_curr_start(cfs_rq, se); in set_next_entity()
5008 cfs_rq->curr = se; in set_next_entity()
5016 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
5040 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) in pick_next_entity() argument
5042 struct sched_entity *left = __pick_first_entity(cfs_rq); in pick_next_entity()
5045 trace_android_rvh_pick_next_entity(cfs_rq, curr, &se); in pick_next_entity()
5062 if (cfs_rq->skip && cfs_rq->skip == se) { in pick_next_entity()
5066 second = __pick_first_entity(cfs_rq); in pick_next_entity()
5077 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) { in pick_next_entity()
5081 se = cfs_rq->next; in pick_next_entity()
5082 } else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) { in pick_next_entity()
5086 se = cfs_rq->last; in pick_next_entity()
5093 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
5095 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) in put_prev_entity() argument
5102 update_curr(cfs_rq); in put_prev_entity()
5105 check_cfs_rq_runtime(cfs_rq); in put_prev_entity()
5107 check_spread(cfs_rq, prev); in put_prev_entity()
5110 update_stats_wait_start_fair(cfs_rq, prev); in put_prev_entity()
5112 __enqueue_entity(cfs_rq, prev); in put_prev_entity()
5114 update_load_avg(cfs_rq, prev, 0); in put_prev_entity()
5116 cfs_rq->curr = NULL; in put_prev_entity()
5120 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) in entity_tick() argument
5125 update_curr(cfs_rq); in entity_tick()
5130 update_load_avg(cfs_rq, curr, UPDATE_TG); in entity_tick()
5139 resched_curr(rq_of(cfs_rq)); in entity_tick()
5146 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) in entity_tick()
5150 if (cfs_rq->nr_running > 1) in entity_tick()
5151 check_preempt_tick(cfs_rq, curr); in entity_tick()
5152 trace_android_rvh_entity_tick(cfs_rq, curr); in entity_tick()
5235 struct cfs_rq *cfs_rq, u64 target_runtime) in __assign_cfs_rq_runtime() argument
5242 min_amount = target_runtime - cfs_rq->runtime_remaining; in __assign_cfs_rq_runtime()
5256 cfs_rq->runtime_remaining += amount; in __assign_cfs_rq_runtime()
5258 return cfs_rq->runtime_remaining > 0; in __assign_cfs_rq_runtime()
5262 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) in assign_cfs_rq_runtime() argument
5264 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in assign_cfs_rq_runtime()
5268 ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice()); in assign_cfs_rq_runtime()
5274 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in __account_cfs_rq_runtime() argument
5277 cfs_rq->runtime_remaining -= delta_exec; in __account_cfs_rq_runtime()
5279 if (likely(cfs_rq->runtime_remaining > 0)) in __account_cfs_rq_runtime()
5282 if (cfs_rq->throttled) in __account_cfs_rq_runtime()
5288 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) in __account_cfs_rq_runtime()
5289 resched_curr(rq_of(cfs_rq)); in __account_cfs_rq_runtime()
5293 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in account_cfs_rq_runtime() argument
5295 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) in account_cfs_rq_runtime()
5298 __account_cfs_rq_runtime(cfs_rq, delta_exec); in account_cfs_rq_runtime()
5301 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
5303 return cfs_bandwidth_used() && cfs_rq->throttled; in cfs_rq_throttled()
5307 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
5309 return cfs_bandwidth_used() && cfs_rq->throttle_count; in throttled_hierarchy()
5320 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; in throttled_lb_pair()
5322 src_cfs_rq = tg->cfs_rq[src_cpu]; in throttled_lb_pair()
5323 dest_cfs_rq = tg->cfs_rq[dest_cpu]; in throttled_lb_pair()
5332 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() local
5334 cfs_rq->throttle_count--; in tg_unthrottle_up()
5335 if (!cfs_rq->throttle_count) { in tg_unthrottle_up()
5336 cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) - in tg_unthrottle_up()
5337 cfs_rq->throttled_clock_pelt; in tg_unthrottle_up()
5340 if (!cfs_rq_is_decayed(cfs_rq)) in tg_unthrottle_up()
5341 list_add_leaf_cfs_rq(cfs_rq); in tg_unthrottle_up()
5350 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() local
5353 if (!cfs_rq->throttle_count) { in tg_throttle_down()
5354 cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq); in tg_throttle_down()
5355 list_del_leaf_cfs_rq(cfs_rq); in tg_throttle_down()
5357 cfs_rq->throttle_count++; in tg_throttle_down()
5362 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) in throttle_cfs_rq() argument
5364 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq()
5365 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in throttle_cfs_rq()
5371 if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) { in throttle_cfs_rq()
5382 list_add_tail_rcu(&cfs_rq->throttled_list, in throttle_cfs_rq()
5390 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
5394 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
5397 task_delta = cfs_rq->h_nr_running; in throttle_cfs_rq()
5398 idle_task_delta = cfs_rq->idle_h_nr_running; in throttle_cfs_rq()
5400 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5408 idle_task_delta = cfs_rq->h_nr_running; in throttle_cfs_rq()
5421 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5430 idle_task_delta = cfs_rq->h_nr_running; in throttle_cfs_rq()
5444 cfs_rq->throttled = 1; in throttle_cfs_rq()
5445 cfs_rq->throttled_clock = rq_clock(rq); in throttle_cfs_rq()
5449 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) in unthrottle_cfs_rq() argument
5451 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq()
5452 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in unthrottle_cfs_rq()
5456 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
5458 cfs_rq->throttled = 0; in unthrottle_cfs_rq()
5463 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq()
5464 list_del_rcu(&cfs_rq->throttled_list); in unthrottle_cfs_rq()
5468 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
5470 if (!cfs_rq->load.weight) { in unthrottle_cfs_rq()
5471 if (!cfs_rq->on_list) in unthrottle_cfs_rq()
5484 task_delta = cfs_rq->h_nr_running; in unthrottle_cfs_rq()
5485 idle_task_delta = cfs_rq->idle_h_nr_running; in unthrottle_cfs_rq()
5487 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
5494 idle_task_delta = cfs_rq->h_nr_running; in unthrottle_cfs_rq()
5505 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
5511 idle_task_delta = cfs_rq->h_nr_running; in unthrottle_cfs_rq()
5534 struct cfs_rq *cfs_rq; in distribute_cfs_runtime() local
5538 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, in distribute_cfs_runtime()
5540 struct rq *rq = rq_of(cfs_rq); in distribute_cfs_runtime()
5544 if (!cfs_rq_throttled(cfs_rq)) in distribute_cfs_runtime()
5548 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0); in distribute_cfs_runtime()
5551 runtime = -cfs_rq->runtime_remaining + 1; in distribute_cfs_runtime()
5558 cfs_rq->runtime_remaining += runtime; in distribute_cfs_runtime()
5561 if (cfs_rq->runtime_remaining > 0) in distribute_cfs_runtime()
5562 unthrottle_cfs_rq(cfs_rq); in distribute_cfs_runtime()
5685 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in __return_cfs_rq_runtime() argument
5687 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in __return_cfs_rq_runtime()
5688 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; in __return_cfs_rq_runtime()
5705 cfs_rq->runtime_remaining -= slack_runtime; in __return_cfs_rq_runtime()
5708 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in return_cfs_rq_runtime() argument
5713 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) in return_cfs_rq_runtime()
5716 __return_cfs_rq_runtime(cfs_rq); in return_cfs_rq_runtime()
5753 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) in check_enqueue_throttle() argument
5759 if (!cfs_rq->runtime_enabled || cfs_rq->curr) in check_enqueue_throttle()
5763 if (cfs_rq_throttled(cfs_rq)) in check_enqueue_throttle()
5767 account_cfs_rq_runtime(cfs_rq, 0); in check_enqueue_throttle()
5768 if (cfs_rq->runtime_remaining <= 0) in check_enqueue_throttle()
5769 throttle_cfs_rq(cfs_rq); in check_enqueue_throttle()
5774 struct cfs_rq *pcfs_rq, *cfs_rq; in sync_throttle() local
5782 cfs_rq = tg->cfs_rq[cpu]; in sync_throttle()
5783 pcfs_rq = tg->parent->cfs_rq[cpu]; in sync_throttle()
5785 cfs_rq->throttle_count = pcfs_rq->throttle_count; in sync_throttle()
5786 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); in sync_throttle()
5790 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) in check_cfs_rq_runtime() argument
5795 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) in check_cfs_rq_runtime()
5802 if (cfs_rq_throttled(cfs_rq)) in check_cfs_rq_runtime()
5805 return throttle_cfs_rq(cfs_rq); in check_cfs_rq_runtime()
5891 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) in init_cfs_rq_runtime() argument
5893 cfs_rq->runtime_enabled = 0; in init_cfs_rq_runtime()
5894 INIT_LIST_HEAD(&cfs_rq->throttled_list); in init_cfs_rq_runtime()
5936 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled() local
5939 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; in update_runtime_enabled()
5954 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in unthrottle_offline_cfs_rqs() local
5956 if (!cfs_rq->runtime_enabled) in unthrottle_offline_cfs_rqs()
5963 cfs_rq->runtime_remaining = 1; in unthrottle_offline_cfs_rqs()
5968 cfs_rq->runtime_enabled = 0; in unthrottle_offline_cfs_rqs()
5970 if (cfs_rq_throttled(cfs_rq)) in unthrottle_offline_cfs_rqs()
5971 unthrottle_cfs_rq(cfs_rq); in unthrottle_offline_cfs_rqs()
5983 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} in account_cfs_rq_runtime() argument
5984 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } in check_cfs_rq_runtime() argument
5985 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} in check_enqueue_throttle() argument
5987 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in return_cfs_rq_runtime() argument
5989 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
5994 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
6008 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in init_cfs_rq_runtime() argument
6029 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair() local
6034 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair()
6111 static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq) in sched_idle_cfs_rq() argument
6113 return cfs_rq->nr_running && in sched_idle_cfs_rq()
6114 cfs_rq->nr_running == cfs_rq->idle_nr_running; in sched_idle_cfs_rq()
6132 struct cfs_rq *cfs_rq; in enqueue_task_fair() local
6159 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6160 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
6162 cfs_rq->h_nr_running++; in enqueue_task_fair()
6163 cfs_rq->idle_h_nr_running += idle_h_nr_running; in enqueue_task_fair()
6165 if (cfs_rq_is_idle(cfs_rq)) in enqueue_task_fair()
6169 if (cfs_rq_throttled(cfs_rq)) in enqueue_task_fair()
6177 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6179 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
6183 cfs_rq->h_nr_running++; in enqueue_task_fair()
6184 cfs_rq->idle_h_nr_running += idle_h_nr_running; in enqueue_task_fair()
6186 if (cfs_rq_is_idle(cfs_rq)) in enqueue_task_fair()
6190 if (cfs_rq_throttled(cfs_rq)) in enqueue_task_fair()
6229 struct cfs_rq *cfs_rq; in dequeue_task_fair() local
6238 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
6239 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
6241 cfs_rq->h_nr_running--; in dequeue_task_fair()
6242 cfs_rq->idle_h_nr_running -= idle_h_nr_running; in dequeue_task_fair()
6244 if (cfs_rq_is_idle(cfs_rq)) in dequeue_task_fair()
6248 if (cfs_rq_throttled(cfs_rq)) in dequeue_task_fair()
6252 if (cfs_rq->load.weight) { in dequeue_task_fair()
6259 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_task_fair()
6268 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
6270 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_task_fair()
6274 cfs_rq->h_nr_running--; in dequeue_task_fair()
6275 cfs_rq->idle_h_nr_running -= idle_h_nr_running; in dequeue_task_fair()
6277 if (cfs_rq_is_idle(cfs_rq)) in dequeue_task_fair()
6281 if (cfs_rq_throttled(cfs_rq)) in dequeue_task_fair()
6337 struct cfs_rq *cfs_rq; in cpu_load_without() local
6344 cfs_rq = &rq->cfs; in cpu_load_without()
6345 load = READ_ONCE(cfs_rq->avg.load_avg); in cpu_load_without()
6360 struct cfs_rq *cfs_rq; in cpu_runnable_without() local
6367 cfs_rq = &rq->cfs; in cpu_runnable_without()
6368 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_runnable_without()
7055 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_next() local
7056 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util_next()
7072 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); in cpu_util_next()
7599 struct cfs_rq *cfs_rq = cfs_rq_of(se); in migrate_task_rq_fair() local
7601 se->vruntime -= u64_u32_load(cfs_rq->min_vruntime); in migrate_task_rq_fair()
7725 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in check_preempt_wakeup() local
7726 int scale = cfs_rq->nr_running >= sched_nr_latency; in check_preempt_wakeup()
7834 struct cfs_rq *cfs_rq; in pick_task_fair() local
7837 cfs_rq = &rq->cfs; in pick_task_fair()
7838 if (!cfs_rq->nr_running) in pick_task_fair()
7842 struct sched_entity *curr = cfs_rq->curr; in pick_task_fair()
7847 update_curr(cfs_rq); in pick_task_fair()
7851 if (unlikely(check_cfs_rq_runtime(cfs_rq))) in pick_task_fair()
7855 se = pick_next_entity(cfs_rq, curr); in pick_task_fair()
7856 cfs_rq = group_cfs_rq(se); in pick_task_fair()
7857 } while (cfs_rq); in pick_task_fair()
7866 struct cfs_rq *cfs_rq = &rq->cfs; in pick_next_task_fair() local
7889 struct sched_entity *curr = cfs_rq->curr; in pick_next_task_fair()
7899 update_curr(cfs_rq); in pick_next_task_fair()
7909 if (unlikely(check_cfs_rq_runtime(cfs_rq))) { in pick_next_task_fair()
7910 cfs_rq = &rq->cfs; in pick_next_task_fair()
7912 if (!cfs_rq->nr_running) in pick_next_task_fair()
7919 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
7920 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
7921 } while (cfs_rq); in pick_next_task_fair()
7933 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
7947 put_prev_entity(cfs_rq, pse); in pick_next_task_fair()
7948 set_next_entity(cfs_rq, se); in pick_next_task_fair()
7962 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
7963 set_next_entity(cfs_rq, se); in pick_next_task_fair()
7964 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
7965 } while (cfs_rq); in pick_next_task_fair()
8023 struct cfs_rq *cfs_rq; in put_prev_task_fair() local
8026 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
8027 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
8039 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in yield_task_fair() local
8048 clear_buddies(cfs_rq, se); in yield_task_fair()
8055 update_curr(cfs_rq); in yield_task_fair()
8734 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) in cfs_rq_has_blocked() argument
8736 if (cfs_rq->avg.load_avg) in cfs_rq_has_blocked()
8739 if (cfs_rq->avg.util_avg) in cfs_rq_has_blocked()
8775 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } in cfs_rq_has_blocked() argument
8811 struct cfs_rq *cfs_rq, *pos; in __update_blocked_fair() local
8821 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { in __update_blocked_fair()
8824 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) { in __update_blocked_fair()
8825 update_tg_load_avg(cfs_rq); in __update_blocked_fair()
8827 if (cfs_rq->nr_running == 0) in __update_blocked_fair()
8828 update_idle_cfs_rq_clock_pelt(cfs_rq); in __update_blocked_fair()
8830 if (cfs_rq == &rq->cfs) in __update_blocked_fair()
8835 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
8843 if (cfs_rq_is_decayed(cfs_rq)) in __update_blocked_fair()
8844 list_del_leaf_cfs_rq(cfs_rq); in __update_blocked_fair()
8847 if (cfs_rq_has_blocked(cfs_rq)) in __update_blocked_fair()
8859 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) in update_cfs_rq_h_load() argument
8861 struct rq *rq = rq_of(cfs_rq); in update_cfs_rq_h_load()
8862 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
8866 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
8869 WRITE_ONCE(cfs_rq->h_load_next, NULL); in update_cfs_rq_h_load()
8871 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
8872 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
8873 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
8878 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); in update_cfs_rq_h_load()
8879 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
8882 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
8883 load = cfs_rq->h_load; in update_cfs_rq_h_load()
8885 cfs_rq_load_avg(cfs_rq) + 1); in update_cfs_rq_h_load()
8886 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
8887 cfs_rq->h_load = load; in update_cfs_rq_h_load()
8888 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
8894 struct cfs_rq *cfs_rq = task_cfs_rq(p); in task_h_load() local
8896 update_cfs_rq_h_load(cfs_rq); in task_h_load()
8897 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
8898 cfs_rq_load_avg(cfs_rq) + 1); in task_h_load()
8903 struct cfs_rq *cfs_rq = &rq->cfs; in __update_blocked_fair() local
8906 decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); in __update_blocked_fair()
8907 if (cfs_rq_has_blocked(cfs_rq)) in __update_blocked_fair()
11906 struct cfs_rq *cfs_rq = cfs_rq_of(se); in se_fi_update() local
11909 if (cfs_rq->forceidle_seq == fi_seq) in se_fi_update()
11911 cfs_rq->forceidle_seq = fi_seq; in se_fi_update()
11914 cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime; in se_fi_update()
11933 struct cfs_rq *cfs_rqa; in cfs_prio_less()
11934 struct cfs_rq *cfs_rqb; in cfs_prio_less()
11944 while (sea->cfs_rq->tg != seb->cfs_rq->tg) { in cfs_prio_less()
11957 cfs_rqa = sea->cfs_rq; in cfs_prio_less()
11958 cfs_rqb = seb->cfs_rq; in cfs_prio_less()
11988 struct cfs_rq *cfs_rq; in task_tick_fair() local
11992 cfs_rq = cfs_rq_of(se); in task_tick_fair()
11993 entity_tick(cfs_rq, se, queued); in task_tick_fair()
12012 struct cfs_rq *cfs_rq; in task_fork_fair() local
12020 cfs_rq = task_cfs_rq(current); in task_fork_fair()
12021 curr = cfs_rq->curr; in task_fork_fair()
12023 update_curr(cfs_rq); in task_fork_fair()
12026 place_entity(cfs_rq, se, 1); in task_fork_fair()
12037 se->vruntime -= cfs_rq->min_vruntime; in task_fork_fair()
12101 struct cfs_rq *cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq() local
12103 if (cfs_rq_throttled(cfs_rq)) in propagate_entity_cfs_rq()
12106 if (!throttled_hierarchy(cfs_rq)) in propagate_entity_cfs_rq()
12107 list_add_leaf_cfs_rq(cfs_rq); in propagate_entity_cfs_rq()
12113 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
12115 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
12117 if (cfs_rq_throttled(cfs_rq)) in propagate_entity_cfs_rq()
12120 if (!throttled_hierarchy(cfs_rq)) in propagate_entity_cfs_rq()
12121 list_add_leaf_cfs_rq(cfs_rq); in propagate_entity_cfs_rq()
12130 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq() local
12144 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
12145 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
12146 update_tg_load_avg(cfs_rq); in detach_entity_cfs_rq()
12152 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq() local
12155 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
12156 attach_entity_load_avg(cfs_rq, se); in attach_entity_cfs_rq()
12157 update_tg_load_avg(cfs_rq); in attach_entity_cfs_rq()
12164 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_task_cfs_rq() local
12171 place_entity(cfs_rq, se, 0); in detach_task_cfs_rq()
12172 se->vruntime -= cfs_rq->min_vruntime; in detach_task_cfs_rq()
12181 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_task_cfs_rq() local
12186 se->vruntime += cfs_rq->min_vruntime; in attach_task_cfs_rq()
12231 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair() local
12233 set_next_entity(cfs_rq, se); in set_next_task_fair()
12235 account_cfs_rq_runtime(cfs_rq, 0); in set_next_task_fair()
12239 void init_cfs_rq(struct cfs_rq *cfs_rq) in init_cfs_rq() argument
12241 cfs_rq->tasks_timeline = RB_ROOT_CACHED; in init_cfs_rq()
12242 u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20))); in init_cfs_rq()
12244 raw_spin_lock_init(&cfs_rq->removed.lock); in init_cfs_rq()
12273 if (tg->cfs_rq) in free_fair_sched_group()
12274 kfree(tg->cfs_rq[i]); in free_fair_sched_group()
12279 kfree(tg->cfs_rq); in free_fair_sched_group()
12286 struct cfs_rq *cfs_rq; in alloc_fair_sched_group() local
12289 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); in alloc_fair_sched_group()
12290 if (!tg->cfs_rq) in alloc_fair_sched_group()
12301 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), in alloc_fair_sched_group()
12303 if (!cfs_rq) in alloc_fair_sched_group()
12311 init_cfs_rq(cfs_rq); in alloc_fair_sched_group()
12312 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
12319 kfree(cfs_rq); in alloc_fair_sched_group()
12358 if (!tg->cfs_rq[cpu]->on_list) in unregister_fair_sched_group()
12364 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); in unregister_fair_sched_group()
12369 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry() argument
12375 cfs_rq->tg = tg; in init_tg_cfs_entry()
12376 cfs_rq->rq = rq; in init_tg_cfs_entry()
12377 init_cfs_rq_runtime(cfs_rq); in init_tg_cfs_entry()
12379 tg->cfs_rq[cpu] = cfs_rq; in init_tg_cfs_entry()
12387 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
12390 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
12394 se->my_q = cfs_rq; in init_tg_cfs_entry()
12474 struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i]; in sched_group_set_idle()
12499 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sched_group_set_idle() local
12504 cfs_rq->idle_h_nr_running += idle_task_delta; in sched_group_set_idle()
12507 if (cfs_rq_is_idle(cfs_rq)) in sched_group_set_idle()
12608 struct cfs_rq *cfs_rq, *pos; in print_cfs_stats() local
12611 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) in print_cfs_stats()
12612 print_cfs_rq(m, cpu, cfs_rq); in print_cfs_stats()