Lines Matching refs:cfs_rq
261 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq()
263 return p->se.cfs_rq; in task_cfs_rq()
267 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of()
269 return se->cfs_rq; in cfs_rq_of()
273 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq()
278 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) in cfs_rq_tg_path() argument
283 if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) in cfs_rq_tg_path()
284 autogroup_path(cfs_rq->tg, path, len); in cfs_rq_tg_path()
285 else if (cfs_rq && cfs_rq->tg->css.cgroup) in cfs_rq_tg_path()
286 cgroup_path(cfs_rq->tg->css.cgroup, path, len); in cfs_rq_tg_path()
291 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
293 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq()
296 if (cfs_rq->on_list) in list_add_leaf_cfs_rq()
299 cfs_rq->on_list = 1; in list_add_leaf_cfs_rq()
310 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq()
311 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { in list_add_leaf_cfs_rq()
318 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
319 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); in list_add_leaf_cfs_rq()
329 if (!cfs_rq->tg->parent) { in list_add_leaf_cfs_rq()
334 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
350 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); in list_add_leaf_cfs_rq()
355 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
359 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
361 if (cfs_rq->on_list) { in list_del_leaf_cfs_rq()
362 struct rq *rq = rq_of(cfs_rq); in list_del_leaf_cfs_rq()
371 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) in list_del_leaf_cfs_rq()
372 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; in list_del_leaf_cfs_rq()
374 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); in list_del_leaf_cfs_rq()
375 cfs_rq->on_list = 0; in list_del_leaf_cfs_rq()
385 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
386 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
390 static inline struct cfs_rq *
393 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
394 return se->cfs_rq; in is_same_group()
446 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq()
451 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of()
460 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq()
465 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) in cfs_rq_tg_path() argument
471 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
476 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
484 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
485 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
500 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
530 static void update_min_vruntime(struct cfs_rq *cfs_rq) in update_min_vruntime() argument
532 struct sched_entity *curr = cfs_rq->curr; in update_min_vruntime()
533 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); in update_min_vruntime()
535 u64 vruntime = cfs_rq->min_vruntime; in update_min_vruntime()
555 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); in update_min_vruntime()
558 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; in update_min_vruntime()
565 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
567 struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node; in __enqueue_entity()
592 &cfs_rq->tasks_timeline, leftmost); in __enqueue_entity()
595 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
597 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); in __dequeue_entity()
600 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) in __pick_first_entity() argument
602 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); in __pick_first_entity()
621 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) in __pick_last_entity() argument
623 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); in __pick_last_entity()
692 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice() argument
694 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); in sched_slice()
700 cfs_rq = cfs_rq_of(se); in sched_slice()
701 load = &cfs_rq->load; in sched_slice()
704 lw = cfs_rq->load; in sched_slice()
719 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice() argument
721 return calc_delta_fair(sched_slice(cfs_rq, se), se); in sched_vslice()
783 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg() local
785 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg()
786 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg()
789 if (cfs_rq->avg.util_avg != 0) { in post_init_entity_util_avg()
790 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
791 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg()
811 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
825 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) in update_tg_load_avg() argument
833 static void update_curr(struct cfs_rq *cfs_rq) in update_curr() argument
835 struct sched_entity *curr = cfs_rq->curr; in update_curr()
836 u64 now = rq_clock_task(rq_of(cfs_rq)); in update_curr()
852 schedstat_add(cfs_rq->exec_clock, delta_exec); in update_curr()
855 update_min_vruntime(cfs_rq); in update_curr()
865 account_cfs_rq_runtime(cfs_rq, delta_exec); in update_curr()
874 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start() argument
881 wait_start = rq_clock(rq_of(cfs_rq)); in update_stats_wait_start()
892 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end() argument
900 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); in update_stats_wait_end()
924 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper() argument
939 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start; in update_stats_enqueue_sleeper()
956 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start; in update_stats_enqueue_sleeper()
995 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue() argument
1004 if (se != cfs_rq->curr) in update_stats_enqueue()
1005 update_stats_wait_start(cfs_rq, se); in update_stats_enqueue()
1008 update_stats_enqueue_sleeper(cfs_rq, se); in update_stats_enqueue()
1012 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue() argument
1022 if (se != cfs_rq->curr) in update_stats_dequeue()
1023 update_stats_wait_end(cfs_rq, se); in update_stats_dequeue()
1030 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue()
1033 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue()
1041 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1046 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2757 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
2759 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
2762 struct rq *rq = rq_of(cfs_rq); in account_entity_enqueue()
2768 cfs_rq->nr_running++; in account_entity_enqueue()
2772 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
2774 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
2777 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
2781 cfs_rq->nr_running--; in account_entity_dequeue()
2834 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_runnable_load_avg() argument
2836 cfs_rq->runnable_weight += se->runnable_weight; in enqueue_runnable_load_avg()
2838 cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg; in enqueue_runnable_load_avg()
2839 cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum; in enqueue_runnable_load_avg()
2843 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_runnable_load_avg() argument
2845 cfs_rq->runnable_weight -= se->runnable_weight; in dequeue_runnable_load_avg()
2847 sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg); in dequeue_runnable_load_avg()
2848 sub_positive(&cfs_rq->avg.runnable_load_sum, in dequeue_runnable_load_avg()
2853 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
2855 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
2856 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
2860 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
2862 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
2863 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
2867 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_runnable_load_avg() argument
2869 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_runnable_load_avg() argument
2871 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
2873 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
2876 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
2881 if (cfs_rq->curr == se) in reweight_entity()
2882 update_curr(cfs_rq); in reweight_entity()
2883 account_entity_dequeue(cfs_rq, se); in reweight_entity()
2884 dequeue_runnable_load_avg(cfs_rq, se); in reweight_entity()
2886 dequeue_load_avg(cfs_rq, se); in reweight_entity()
2901 enqueue_load_avg(cfs_rq, se); in reweight_entity()
2903 account_entity_enqueue(cfs_rq, se); in reweight_entity()
2904 enqueue_runnable_load_avg(cfs_rq, se); in reweight_entity()
2911 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task() local
2915 reweight_entity(cfs_rq, se, weight, weight); in reweight_task()
2994 static long calc_group_shares(struct cfs_rq *cfs_rq) in calc_group_shares() argument
2997 struct task_group *tg = cfs_rq->tg; in calc_group_shares()
3001 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); in calc_group_shares()
3006 tg_weight -= cfs_rq->tg_load_avg_contrib; in calc_group_shares()
3055 static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares) in calc_group_runnable() argument
3059 load_avg = max(cfs_rq->avg.load_avg, in calc_group_runnable()
3060 scale_load_down(cfs_rq->load.weight)); in calc_group_runnable()
3062 runnable = max(cfs_rq->avg.runnable_load_avg, in calc_group_runnable()
3063 scale_load_down(cfs_rq->runnable_weight)); in calc_group_runnable()
3073 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3081 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3109 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) in cfs_rq_util_change() argument
3111 struct rq *rq = rq_of(cfs_rq); in cfs_rq_util_change()
3113 if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) { in cfs_rq_util_change()
3149 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) in update_tg_load_avg() argument
3151 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; in update_tg_load_avg()
3156 if (cfs_rq->tg == &root_task_group) in update_tg_load_avg()
3159 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { in update_tg_load_avg()
3160 atomic_long_add(delta, &cfs_rq->tg->load_avg); in update_tg_load_avg()
3161 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; in update_tg_load_avg()
3171 struct cfs_rq *prev, struct cfs_rq *next) in set_task_rq_fair()
3284 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
3305 add_positive(&cfs_rq->avg.util_avg, delta); in update_tg_cfs_util()
3306 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX; in update_tg_cfs_util()
3310 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
3360 add_positive(&cfs_rq->avg.load_avg, delta_avg); in update_tg_cfs_runnable()
3361 add_positive(&cfs_rq->avg.load_sum, delta_sum); in update_tg_cfs_runnable()
3372 add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg); in update_tg_cfs_runnable()
3373 add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum); in update_tg_cfs_runnable()
3377 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) in add_tg_cfs_propagate() argument
3379 cfs_rq->propagate = 1; in add_tg_cfs_propagate()
3380 cfs_rq->prop_runnable_sum += runnable_sum; in add_tg_cfs_propagate()
3386 struct cfs_rq *cfs_rq, *gcfs_rq; in propagate_entity_load_avg() local
3397 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
3399 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); in propagate_entity_load_avg()
3401 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3402 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3404 trace_pelt_cfs_tp(cfs_rq); in propagate_entity_load_avg()
3416 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
3442 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} in update_tg_load_avg() argument
3449 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} in add_tg_cfs_propagate() argument
3470 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument
3473 struct sched_avg *sa = &cfs_rq->avg; in update_cfs_rq_load_avg()
3476 if (cfs_rq->removed.nr) { in update_cfs_rq_load_avg()
3480 raw_spin_lock(&cfs_rq->removed.lock); in update_cfs_rq_load_avg()
3481 swap(cfs_rq->removed.util_avg, removed_util); in update_cfs_rq_load_avg()
3482 swap(cfs_rq->removed.load_avg, removed_load); in update_cfs_rq_load_avg()
3483 swap(cfs_rq->removed.runnable_sum, removed_runnable_sum); in update_cfs_rq_load_avg()
3484 cfs_rq->removed.nr = 0; in update_cfs_rq_load_avg()
3485 raw_spin_unlock(&cfs_rq->removed.lock); in update_cfs_rq_load_avg()
3495 add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum); in update_cfs_rq_load_avg()
3500 decayed |= __update_load_avg_cfs_rq(now, cfs_rq); in update_cfs_rq_load_avg()
3504 cfs_rq->load_last_update_time_copy = sa->last_update_time; in update_cfs_rq_load_avg()
3519 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in attach_entity_load_avg() argument
3521 u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib; in attach_entity_load_avg()
3530 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
3531 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
3549 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
3550 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
3551 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
3553 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
3555 cfs_rq_util_change(cfs_rq, flags); in attach_entity_load_avg()
3557 trace_pelt_cfs_tp(cfs_rq); in attach_entity_load_avg()
3568 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
3570 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
3571 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
3572 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
3574 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
3576 cfs_rq_util_change(cfs_rq, 0); in detach_entity_load_avg()
3578 trace_pelt_cfs_tp(cfs_rq); in detach_entity_load_avg()
3589 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
3591 u64 now = cfs_rq_clock_pelt(cfs_rq); in update_load_avg()
3599 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
3601 decayed = update_cfs_rq_load_avg(now, cfs_rq); in update_load_avg()
3613 attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION); in update_load_avg()
3614 update_tg_load_avg(cfs_rq, 0); in update_load_avg()
3617 cfs_rq_util_change(cfs_rq, 0); in update_load_avg()
3620 update_tg_load_avg(cfs_rq, 0); in update_load_avg()
3625 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) in cfs_rq_last_update_time() argument
3631 last_update_time_copy = cfs_rq->load_last_update_time_copy; in cfs_rq_last_update_time()
3633 last_update_time = cfs_rq->avg.last_update_time; in cfs_rq_last_update_time()
3639 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) in cfs_rq_last_update_time() argument
3641 return cfs_rq->avg.last_update_time; in cfs_rq_last_update_time()
3651 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg() local
3654 last_update_time = cfs_rq_last_update_time(cfs_rq); in sync_entity_load_avg()
3664 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg() local
3675 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); in remove_entity_load_avg()
3676 ++cfs_rq->removed.nr; in remove_entity_load_avg()
3677 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
3678 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
3679 cfs_rq->removed.runnable_sum += se->avg.load_sum; /* == runnable_sum */ in remove_entity_load_avg()
3680 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); in remove_entity_load_avg()
3683 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq) in cfs_rq_runnable_load_avg() argument
3685 return cfs_rq->avg.runnable_load_avg; in cfs_rq_runnable_load_avg()
3688 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) in cfs_rq_load_avg() argument
3690 return cfs_rq->avg.load_avg; in cfs_rq_load_avg()
3724 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, in util_est_enqueue() argument
3733 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_enqueue()
3735 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_enqueue()
3752 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) in util_est_dequeue() argument
3762 ue.enqueued = cfs_rq->avg.util_est.enqueued; in util_est_dequeue()
3764 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); in util_est_dequeue()
3794 cpu = cpu_of(rq_of(cfs_rq)); in util_est_dequeue()
3850 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
3852 cfs_rq_util_change(cfs_rq, 0); in update_load_avg()
3858 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {} in attach_entity_load_avg() argument
3860 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
3868 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_enqueue() argument
3871 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, in util_est_dequeue() argument
3877 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread() argument
3880 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread()
3886 schedstat_inc(cfs_rq->nr_spread_over); in check_spread()
3891 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity() argument
3893 u64 vruntime = cfs_rq->min_vruntime; in place_entity()
3902 vruntime += sched_vslice(cfs_rq, se); in place_entity()
3922 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3976 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
3979 bool curr = cfs_rq->curr == se; in enqueue_entity()
3986 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
3988 update_curr(cfs_rq); in enqueue_entity()
3997 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
4007 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
4009 enqueue_runnable_load_avg(cfs_rq, se); in enqueue_entity()
4010 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
4013 place_entity(cfs_rq, se, 0); in enqueue_entity()
4016 update_stats_enqueue(cfs_rq, se, flags); in enqueue_entity()
4017 check_spread(cfs_rq, se); in enqueue_entity()
4019 __enqueue_entity(cfs_rq, se); in enqueue_entity()
4022 if (cfs_rq->nr_running == 1) { in enqueue_entity()
4023 list_add_leaf_cfs_rq(cfs_rq); in enqueue_entity()
4024 check_enqueue_throttle(cfs_rq); in enqueue_entity()
4031 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last() local
4032 if (cfs_rq->last != se) in __clear_buddies_last()
4035 cfs_rq->last = NULL; in __clear_buddies_last()
4042 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next() local
4043 if (cfs_rq->next != se) in __clear_buddies_next()
4046 cfs_rq->next = NULL; in __clear_buddies_next()
4053 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip() local
4054 if (cfs_rq->skip != se) in __clear_buddies_skip()
4057 cfs_rq->skip = NULL; in __clear_buddies_skip()
4061 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
4063 if (cfs_rq->last == se) in clear_buddies()
4066 if (cfs_rq->next == se) in clear_buddies()
4069 if (cfs_rq->skip == se) in clear_buddies()
4073 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4076 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
4081 update_curr(cfs_rq); in dequeue_entity()
4091 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_entity()
4092 dequeue_runnable_load_avg(cfs_rq, se); in dequeue_entity()
4094 update_stats_dequeue(cfs_rq, se, flags); in dequeue_entity()
4096 clear_buddies(cfs_rq, se); in dequeue_entity()
4098 if (se != cfs_rq->curr) in dequeue_entity()
4099 __dequeue_entity(cfs_rq, se); in dequeue_entity()
4101 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
4110 se->vruntime -= cfs_rq->min_vruntime; in dequeue_entity()
4113 return_cfs_rq_runtime(cfs_rq); in dequeue_entity()
4124 update_min_vruntime(cfs_rq); in dequeue_entity()
4131 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) in check_preempt_tick() argument
4137 ideal_runtime = sched_slice(cfs_rq, curr); in check_preempt_tick()
4140 resched_curr(rq_of(cfs_rq)); in check_preempt_tick()
4145 clear_buddies(cfs_rq, curr); in check_preempt_tick()
4157 se = __pick_first_entity(cfs_rq); in check_preempt_tick()
4164 resched_curr(rq_of(cfs_rq)); in check_preempt_tick()
4168 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
4177 update_stats_wait_end(cfs_rq, se); in set_next_entity()
4178 __dequeue_entity(cfs_rq, se); in set_next_entity()
4179 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
4182 update_stats_curr_start(cfs_rq, se); in set_next_entity()
4183 cfs_rq->curr = se; in set_next_entity()
4191 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
4211 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) in pick_next_entity() argument
4213 struct sched_entity *left = __pick_first_entity(cfs_rq); in pick_next_entity()
4229 if (cfs_rq->skip == se) { in pick_next_entity()
4233 second = __pick_first_entity(cfs_rq); in pick_next_entity()
4247 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) in pick_next_entity()
4248 se = cfs_rq->last; in pick_next_entity()
4253 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) in pick_next_entity()
4254 se = cfs_rq->next; in pick_next_entity()
4256 clear_buddies(cfs_rq, se); in pick_next_entity()
4261 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4263 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) in put_prev_entity() argument
4270 update_curr(cfs_rq); in put_prev_entity()
4273 check_cfs_rq_runtime(cfs_rq); in put_prev_entity()
4275 check_spread(cfs_rq, prev); in put_prev_entity()
4278 update_stats_wait_start(cfs_rq, prev); in put_prev_entity()
4280 __enqueue_entity(cfs_rq, prev); in put_prev_entity()
4282 update_load_avg(cfs_rq, prev, 0); in put_prev_entity()
4284 cfs_rq->curr = NULL; in put_prev_entity()
4288 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) in entity_tick() argument
4293 update_curr(cfs_rq); in entity_tick()
4298 update_load_avg(cfs_rq, curr, UPDATE_TG); in entity_tick()
4307 resched_curr(rq_of(cfs_rq)); in entity_tick()
4314 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) in entity_tick()
4318 if (cfs_rq->nr_running > 1) in entity_tick()
4319 check_preempt_tick(cfs_rq, curr); in entity_tick()
4389 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) in assign_cfs_rq_runtime() argument
4391 struct task_group *tg = cfs_rq->tg; in assign_cfs_rq_runtime()
4396 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; in assign_cfs_rq_runtime()
4412 cfs_rq->runtime_remaining += amount; in assign_cfs_rq_runtime()
4414 return cfs_rq->runtime_remaining > 0; in assign_cfs_rq_runtime()
4417 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in __account_cfs_rq_runtime() argument
4420 cfs_rq->runtime_remaining -= delta_exec; in __account_cfs_rq_runtime()
4422 if (likely(cfs_rq->runtime_remaining > 0)) in __account_cfs_rq_runtime()
4425 if (cfs_rq->throttled) in __account_cfs_rq_runtime()
4431 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) in __account_cfs_rq_runtime()
4432 resched_curr(rq_of(cfs_rq)); in __account_cfs_rq_runtime()
4436 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in account_cfs_rq_runtime() argument
4438 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) in account_cfs_rq_runtime()
4441 __account_cfs_rq_runtime(cfs_rq, delta_exec); in account_cfs_rq_runtime()
4444 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
4446 return cfs_bandwidth_used() && cfs_rq->throttled; in cfs_rq_throttled()
4450 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
4452 return cfs_bandwidth_used() && cfs_rq->throttle_count; in throttled_hierarchy()
4463 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; in throttled_lb_pair()
4465 src_cfs_rq = tg->cfs_rq[src_cpu]; in throttled_lb_pair()
4466 dest_cfs_rq = tg->cfs_rq[dest_cpu]; in throttled_lb_pair()
4475 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() local
4477 cfs_rq->throttle_count--; in tg_unthrottle_up()
4478 if (!cfs_rq->throttle_count) { in tg_unthrottle_up()
4479 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - in tg_unthrottle_up()
4480 cfs_rq->throttled_clock_task; in tg_unthrottle_up()
4483 if (cfs_rq->nr_running >= 1) in tg_unthrottle_up()
4484 list_add_leaf_cfs_rq(cfs_rq); in tg_unthrottle_up()
4493 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() local
4496 if (!cfs_rq->throttle_count) { in tg_throttle_down()
4497 cfs_rq->throttled_clock_task = rq_clock_task(rq); in tg_throttle_down()
4498 list_del_leaf_cfs_rq(cfs_rq); in tg_throttle_down()
4500 cfs_rq->throttle_count++; in tg_throttle_down()
4505 static void throttle_cfs_rq(struct cfs_rq *cfs_rq) in throttle_cfs_rq() argument
4507 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq()
4508 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in throttle_cfs_rq()
4513 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
4517 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
4520 task_delta = cfs_rq->h_nr_running; in throttle_cfs_rq()
4521 idle_task_delta = cfs_rq->idle_h_nr_running; in throttle_cfs_rq()
4523 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
4540 cfs_rq->throttled = 1; in throttle_cfs_rq()
4541 cfs_rq->throttled_clock = rq_clock(rq); in throttle_cfs_rq()
4551 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); in throttle_cfs_rq()
4553 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); in throttle_cfs_rq()
4565 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) in unthrottle_cfs_rq() argument
4567 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq()
4568 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in unthrottle_cfs_rq()
4573 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
4575 cfs_rq->throttled = 0; in unthrottle_cfs_rq()
4580 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq()
4581 list_del_rcu(&cfs_rq->throttled_list); in unthrottle_cfs_rq()
4585 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
4587 if (!cfs_rq->load.weight) in unthrottle_cfs_rq()
4590 task_delta = cfs_rq->h_nr_running; in unthrottle_cfs_rq()
4591 idle_task_delta = cfs_rq->idle_h_nr_running; in unthrottle_cfs_rq()
4596 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
4598 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
4599 cfs_rq->h_nr_running += task_delta; in unthrottle_cfs_rq()
4600 cfs_rq->idle_h_nr_running += idle_task_delta; in unthrottle_cfs_rq()
4602 if (cfs_rq_throttled(cfs_rq)) in unthrottle_cfs_rq()
4618 struct cfs_rq *cfs_rq; in distribute_cfs_runtime() local
4623 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, in distribute_cfs_runtime()
4625 struct rq *rq = rq_of(cfs_rq); in distribute_cfs_runtime()
4629 if (!cfs_rq_throttled(cfs_rq)) in distribute_cfs_runtime()
4633 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0); in distribute_cfs_runtime()
4635 runtime = -cfs_rq->runtime_remaining + 1; in distribute_cfs_runtime()
4640 cfs_rq->runtime_remaining += runtime; in distribute_cfs_runtime()
4643 if (cfs_rq->runtime_remaining > 0) in distribute_cfs_runtime()
4644 unthrottle_cfs_rq(cfs_rq); in distribute_cfs_runtime()
4778 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in __return_cfs_rq_runtime() argument
4780 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in __return_cfs_rq_runtime()
4781 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; in __return_cfs_rq_runtime()
4798 cfs_rq->runtime_remaining -= slack_runtime; in __return_cfs_rq_runtime()
4801 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in return_cfs_rq_runtime() argument
4806 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) in return_cfs_rq_runtime()
4809 __return_cfs_rq_runtime(cfs_rq); in return_cfs_rq_runtime()
4858 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) in check_enqueue_throttle() argument
4864 if (!cfs_rq->runtime_enabled || cfs_rq->curr) in check_enqueue_throttle()
4868 if (cfs_rq_throttled(cfs_rq)) in check_enqueue_throttle()
4872 account_cfs_rq_runtime(cfs_rq, 0); in check_enqueue_throttle()
4873 if (cfs_rq->runtime_remaining <= 0) in check_enqueue_throttle()
4874 throttle_cfs_rq(cfs_rq); in check_enqueue_throttle()
4879 struct cfs_rq *pcfs_rq, *cfs_rq; in sync_throttle() local
4887 cfs_rq = tg->cfs_rq[cpu]; in sync_throttle()
4888 pcfs_rq = tg->parent->cfs_rq[cpu]; in sync_throttle()
4890 cfs_rq->throttle_count = pcfs_rq->throttle_count; in sync_throttle()
4891 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); in sync_throttle()
4895 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) in check_cfs_rq_runtime() argument
4900 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) in check_cfs_rq_runtime()
4907 if (cfs_rq_throttled(cfs_rq)) in check_cfs_rq_runtime()
4910 throttle_cfs_rq(cfs_rq); in check_cfs_rq_runtime()
4996 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) in init_cfs_rq_runtime() argument
4998 cfs_rq->runtime_enabled = 0; in init_cfs_rq_runtime()
4999 INIT_LIST_HEAD(&cfs_rq->throttled_list); in init_cfs_rq_runtime()
5041 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled() local
5044 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; in update_runtime_enabled()
5059 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in unthrottle_offline_cfs_rqs() local
5061 if (!cfs_rq->runtime_enabled) in unthrottle_offline_cfs_rqs()
5068 cfs_rq->runtime_remaining = 1; in unthrottle_offline_cfs_rqs()
5073 cfs_rq->runtime_enabled = 0; in unthrottle_offline_cfs_rqs()
5075 if (cfs_rq_throttled(cfs_rq)) in unthrottle_offline_cfs_rqs()
5076 unthrottle_cfs_rq(cfs_rq); in unthrottle_offline_cfs_rqs()
5088 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} in account_cfs_rq_runtime() argument
5089 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } in check_cfs_rq_runtime() argument
5090 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} in check_enqueue_throttle() argument
5092 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in return_cfs_rq_runtime() argument
5094 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
5099 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
5113 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in init_cfs_rq_runtime() argument
5134 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair() local
5139 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair()
5205 struct cfs_rq *cfs_rq; in enqueue_task_fair() local
5228 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5229 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
5237 if (cfs_rq_throttled(cfs_rq)) in enqueue_task_fair()
5239 cfs_rq->h_nr_running++; in enqueue_task_fair()
5240 cfs_rq->idle_h_nr_running += idle_h_nr_running; in enqueue_task_fair()
5246 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5247 cfs_rq->h_nr_running++; in enqueue_task_fair()
5248 cfs_rq->idle_h_nr_running += idle_h_nr_running; in enqueue_task_fair()
5250 if (cfs_rq_throttled(cfs_rq)) in enqueue_task_fair()
5253 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
5286 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5288 if (list_add_leaf_cfs_rq(cfs_rq)) in enqueue_task_fair()
5307 struct cfs_rq *cfs_rq; in dequeue_task_fair() local
5313 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5314 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
5322 if (cfs_rq_throttled(cfs_rq)) in dequeue_task_fair()
5324 cfs_rq->h_nr_running--; in dequeue_task_fair()
5325 cfs_rq->idle_h_nr_running -= idle_h_nr_running; in dequeue_task_fair()
5328 if (cfs_rq->load.weight) { in dequeue_task_fair()
5335 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_task_fair()
5343 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5344 cfs_rq->h_nr_running--; in dequeue_task_fair()
5345 cfs_rq->idle_h_nr_running -= idle_h_nr_running; in dequeue_task_fair()
5347 if (cfs_rq_throttled(cfs_rq)) in dequeue_task_fair()
5350 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_task_fair()
6092 struct cfs_rq *cfs_rq; in cpu_util() local
6095 cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util()
6096 util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util()
6099 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); in cpu_util()
6119 struct cfs_rq *cfs_rq; in cpu_util_without() local
6126 cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_without()
6127 util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util_without()
6160 READ_ONCE(cfs_rq->avg.util_est.enqueued); in cpu_util_without()
6226 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_next() local
6227 unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util_next()
6241 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); in cpu_util_next()
6588 struct cfs_rq *cfs_rq = cfs_rq_of(se); in migrate_task_rq_fair() local
6595 min_vruntime_copy = cfs_rq->min_vruntime_copy; in migrate_task_rq_fair()
6597 min_vruntime = cfs_rq->min_vruntime; in migrate_task_rq_fair()
6600 min_vruntime = cfs_rq->min_vruntime; in migrate_task_rq_fair()
6736 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in check_preempt_wakeup() local
6737 int scale = cfs_rq->nr_running >= sched_nr_latency; in check_preempt_wakeup()
6818 struct cfs_rq *cfs_rq = &rq->cfs; in pick_next_task_fair() local
6840 struct sched_entity *curr = cfs_rq->curr; in pick_next_task_fair()
6850 update_curr(cfs_rq); in pick_next_task_fair()
6860 if (unlikely(check_cfs_rq_runtime(cfs_rq))) { in pick_next_task_fair()
6861 cfs_rq = &rq->cfs; in pick_next_task_fair()
6863 if (!cfs_rq->nr_running) in pick_next_task_fair()
6870 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
6871 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
6872 } while (cfs_rq); in pick_next_task_fair()
6884 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
6898 put_prev_entity(cfs_rq, pse); in pick_next_task_fair()
6899 set_next_entity(cfs_rq, se); in pick_next_task_fair()
6909 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
6910 set_next_entity(cfs_rq, se); in pick_next_task_fair()
6911 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
6912 } while (cfs_rq); in pick_next_task_fair()
6965 struct cfs_rq *cfs_rq; in put_prev_task_fair() local
6968 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
6969 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
6981 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in yield_task_fair() local
6990 clear_buddies(cfs_rq, se); in yield_task_fair()
6997 update_curr(cfs_rq); in yield_task_fair()
7547 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) in cfs_rq_has_blocked() argument
7549 if (cfs_rq->avg.load_avg) in cfs_rq_has_blocked()
7552 if (cfs_rq->avg.util_avg) in cfs_rq_has_blocked()
7582 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } in cfs_rq_has_blocked() argument
7611 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) in cfs_rq_is_decayed() argument
7613 if (cfs_rq->load.weight) in cfs_rq_is_decayed()
7616 if (cfs_rq->avg.load_sum) in cfs_rq_is_decayed()
7619 if (cfs_rq->avg.util_sum) in cfs_rq_is_decayed()
7622 if (cfs_rq->avg.runnable_load_sum) in cfs_rq_is_decayed()
7630 struct cfs_rq *cfs_rq, *pos; in __update_blocked_fair() local
7638 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { in __update_blocked_fair()
7641 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) { in __update_blocked_fair()
7642 update_tg_load_avg(cfs_rq, 0); in __update_blocked_fair()
7644 if (cfs_rq == &rq->cfs) in __update_blocked_fair()
7649 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
7657 if (cfs_rq_is_decayed(cfs_rq)) in __update_blocked_fair()
7658 list_del_leaf_cfs_rq(cfs_rq); in __update_blocked_fair()
7661 if (cfs_rq_has_blocked(cfs_rq)) in __update_blocked_fair()
7673 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) in update_cfs_rq_h_load() argument
7675 struct rq *rq = rq_of(cfs_rq); in update_cfs_rq_h_load()
7676 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
7680 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
7683 WRITE_ONCE(cfs_rq->h_load_next, NULL); in update_cfs_rq_h_load()
7685 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
7686 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
7687 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
7692 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); in update_cfs_rq_h_load()
7693 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
7696 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
7697 load = cfs_rq->h_load; in update_cfs_rq_h_load()
7699 cfs_rq_load_avg(cfs_rq) + 1); in update_cfs_rq_h_load()
7700 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
7701 cfs_rq->h_load = load; in update_cfs_rq_h_load()
7702 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
7708 struct cfs_rq *cfs_rq = task_cfs_rq(p); in task_h_load() local
7710 update_cfs_rq_h_load(cfs_rq); in task_h_load()
7711 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
7712 cfs_rq_load_avg(cfs_rq) + 1); in task_h_load()
7717 struct cfs_rq *cfs_rq = &rq->cfs; in __update_blocked_fair() local
7720 decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); in __update_blocked_fair()
7721 if (cfs_rq_has_blocked(cfs_rq)) in __update_blocked_fair()
10087 struct cfs_rq *cfs_rq; in task_tick_fair() local
10091 cfs_rq = cfs_rq_of(se); in task_tick_fair()
10092 entity_tick(cfs_rq, se, queued); in task_tick_fair()
10109 struct cfs_rq *cfs_rq; in task_fork_fair() local
10117 cfs_rq = task_cfs_rq(current); in task_fork_fair()
10118 curr = cfs_rq->curr; in task_fork_fair()
10120 update_curr(cfs_rq); in task_fork_fair()
10123 place_entity(cfs_rq, se, 1); in task_fork_fair()
10134 se->vruntime -= cfs_rq->min_vruntime; in task_fork_fair()
10195 struct cfs_rq *cfs_rq; in propagate_entity_cfs_rq() local
10201 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
10203 if (cfs_rq_throttled(cfs_rq)) in propagate_entity_cfs_rq()
10206 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
10215 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq() local
10218 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
10219 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
10220 update_tg_load_avg(cfs_rq, false); in detach_entity_cfs_rq()
10226 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq() local
10237 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
10238 attach_entity_load_avg(cfs_rq, se, 0); in attach_entity_cfs_rq()
10239 update_tg_load_avg(cfs_rq, false); in attach_entity_cfs_rq()
10246 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_task_cfs_rq() local
10253 place_entity(cfs_rq, se, 0); in detach_task_cfs_rq()
10254 se->vruntime -= cfs_rq->min_vruntime; in detach_task_cfs_rq()
10263 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_task_cfs_rq() local
10268 se->vruntime += cfs_rq->min_vruntime; in attach_task_cfs_rq()
10313 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair() local
10315 set_next_entity(cfs_rq, se); in set_next_task_fair()
10317 account_cfs_rq_runtime(cfs_rq, 0); in set_next_task_fair()
10321 void init_cfs_rq(struct cfs_rq *cfs_rq) in init_cfs_rq() argument
10323 cfs_rq->tasks_timeline = RB_ROOT_CACHED; in init_cfs_rq()
10324 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); in init_cfs_rq()
10326 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; in init_cfs_rq()
10329 raw_spin_lock_init(&cfs_rq->removed.lock); in init_cfs_rq()
10374 if (tg->cfs_rq) in free_fair_sched_group()
10375 kfree(tg->cfs_rq[i]); in free_fair_sched_group()
10380 kfree(tg->cfs_rq); in free_fair_sched_group()
10387 struct cfs_rq *cfs_rq; in alloc_fair_sched_group() local
10390 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); in alloc_fair_sched_group()
10391 if (!tg->cfs_rq) in alloc_fair_sched_group()
10402 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), in alloc_fair_sched_group()
10404 if (!cfs_rq) in alloc_fair_sched_group()
10412 init_cfs_rq(cfs_rq); in alloc_fair_sched_group()
10413 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
10420 kfree(cfs_rq); in alloc_fair_sched_group()
10457 if (!tg->cfs_rq[cpu]->on_list) in unregister_fair_sched_group()
10463 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); in unregister_fair_sched_group()
10468 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry() argument
10474 cfs_rq->tg = tg; in init_tg_cfs_entry()
10475 cfs_rq->rq = rq; in init_tg_cfs_entry()
10476 init_cfs_rq_runtime(cfs_rq); in init_tg_cfs_entry()
10478 tg->cfs_rq[cpu] = cfs_rq; in init_tg_cfs_entry()
10486 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
10489 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
10493 se->my_q = cfs_rq; in init_tg_cfs_entry()
10619 struct cfs_rq *cfs_rq, *pos; in print_cfs_stats() local
10622 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) in print_cfs_stats()
10623 print_cfs_rq(m, cpu, cfs_rq); in print_cfs_stats()
10670 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq) in sched_trace_cfs_rq_avg() argument
10673 return cfs_rq ? &cfs_rq->avg : NULL; in sched_trace_cfs_rq_avg()
10680 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len) in sched_trace_cfs_rq_path() argument
10682 if (!cfs_rq) { in sched_trace_cfs_rq_path()
10689 cfs_rq_tg_path(cfs_rq, str, len); in sched_trace_cfs_rq_path()
10694 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq) in sched_trace_cfs_rq_cpu() argument
10696 return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1; in sched_trace_cfs_rq_cpu()