• Home
  • Raw
  • Download

Lines Matching full:se

290 static inline struct task_struct *task_of(struct sched_entity *se)  in task_of()  argument
292 SCHED_WARN_ON(!entity_is_task(se)); in task_of()
293 return container_of(se, struct task_struct, se); in task_of()
297 #define for_each_sched_entity(se) \ argument
298 for (; se; se = se->parent)
302 return p->se.cfs_rq; in task_cfs_rq()
306 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() argument
308 return se->cfs_rq; in cfs_rq_of()
430 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
432 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
433 return se->cfs_rq; in is_same_group()
438 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
440 return se->parent; in parent_entity()
444 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
456 se_depth = (*se)->depth; in find_matching_se()
461 *se = parent_entity(*se); in find_matching_se()
469 while (!is_same_group(*se, *pse)) { in find_matching_se()
470 *se = parent_entity(*se); in find_matching_se()
477 static inline struct task_struct *task_of(struct sched_entity *se) in task_of() argument
479 return container_of(se, struct task_struct, se); in task_of()
482 #define for_each_sched_entity(se) \ argument
483 for (; se; se = NULL)
490 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() argument
492 struct task_struct *p = task_of(se); in cfs_rq_of()
526 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
532 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
584 struct sched_entity *se; in update_min_vruntime() local
585 se = rb_entry(leftmost, struct sched_entity, run_node); in update_min_vruntime()
588 vruntime = se->vruntime; in update_min_vruntime()
590 vruntime = min_vruntime(vruntime, se->vruntime); in update_min_vruntime()
604 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
621 if (entity_before(se, entry)) { in __enqueue_entity()
629 rb_link_node(&se->run_node, parent, link); in __enqueue_entity()
630 rb_insert_color_cached(&se->run_node, in __enqueue_entity()
634 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
636 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); in __dequeue_entity()
649 static struct sched_entity *__pick_next_entity(struct sched_entity *se) in __pick_next_entity() argument
651 struct rb_node *next = rb_next(&se->run_node); in __pick_next_entity()
700 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) in calc_delta_fair() argument
702 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair()
703 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
730 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice() argument
738 slice = __sched_period(nr_running + !se->on_rq); in sched_slice()
740 for_each_sched_entity(se) { in sched_slice()
744 cfs_rq = cfs_rq_of(se); in sched_slice()
747 if (unlikely(!se->on_rq)) { in sched_slice()
750 update_load_add(&lw, se->load.weight); in sched_slice()
753 slice = __calc_delta(slice, se->load.weight, load); in sched_slice()
767 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice() argument
769 return calc_delta_fair(sched_slice(cfs_rq, se), se); in sched_vslice()
779 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
781 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
791 if (entity_is_task(se)) in init_entity_runnable_average()
792 sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average()
797 static void attach_entity_cfs_rq(struct sched_entity *se);
803 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
827 struct sched_entity *se = &p->se; in post_init_entity_util_avg() local
828 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg()
829 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
835 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
852 attach_entity_load_avg(cfs_rq, se); in post_init_entity_util_avg()
858 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
862 attach_entity_cfs_rq(se); in post_init_entity_util_avg()
866 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
917 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
921 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start() argument
929 prev_wait_start = schedstat_val(se->statistics.wait_start); in update_stats_wait_start()
931 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && in update_stats_wait_start()
935 __schedstat_set(se->statistics.wait_start, wait_start); in update_stats_wait_start()
939 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end() argument
947 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); in update_stats_wait_end()
949 if (entity_is_task(se)) { in update_stats_wait_end()
950 p = task_of(se); in update_stats_wait_end()
957 __schedstat_set(se->statistics.wait_start, delta); in update_stats_wait_end()
963 __schedstat_set(se->statistics.wait_max, in update_stats_wait_end()
964 max(schedstat_val(se->statistics.wait_max), delta)); in update_stats_wait_end()
965 __schedstat_inc(se->statistics.wait_count); in update_stats_wait_end()
966 __schedstat_add(se->statistics.wait_sum, delta); in update_stats_wait_end()
967 __schedstat_set(se->statistics.wait_start, 0); in update_stats_wait_end()
971 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper() argument
979 sleep_start = schedstat_val(se->statistics.sleep_start); in update_stats_enqueue_sleeper()
980 block_start = schedstat_val(se->statistics.block_start); in update_stats_enqueue_sleeper()
982 if (entity_is_task(se)) in update_stats_enqueue_sleeper()
983 tsk = task_of(se); in update_stats_enqueue_sleeper()
991 if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) in update_stats_enqueue_sleeper()
992 __schedstat_set(se->statistics.sleep_max, delta); in update_stats_enqueue_sleeper()
994 __schedstat_set(se->statistics.sleep_start, 0); in update_stats_enqueue_sleeper()
995 __schedstat_add(se->statistics.sum_sleep_runtime, delta); in update_stats_enqueue_sleeper()
1008 if (unlikely(delta > schedstat_val(se->statistics.block_max))) in update_stats_enqueue_sleeper()
1009 __schedstat_set(se->statistics.block_max, delta); in update_stats_enqueue_sleeper()
1011 __schedstat_set(se->statistics.block_start, 0); in update_stats_enqueue_sleeper()
1012 __schedstat_add(se->statistics.sum_sleep_runtime, delta); in update_stats_enqueue_sleeper()
1016 __schedstat_add(se->statistics.iowait_sum, delta); in update_stats_enqueue_sleeper()
1017 __schedstat_inc(se->statistics.iowait_count); in update_stats_enqueue_sleeper()
1042 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue() argument
1051 if (se != cfs_rq->curr) in update_stats_enqueue()
1052 update_stats_wait_start(cfs_rq, se); in update_stats_enqueue()
1055 update_stats_enqueue_sleeper(cfs_rq, se); in update_stats_enqueue()
1059 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue() argument
1069 if (se != cfs_rq->curr) in update_stats_dequeue()
1070 update_stats_wait_end(cfs_rq, se); in update_stats_dequeue()
1072 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { in update_stats_dequeue()
1073 struct task_struct *tsk = task_of(se); in update_stats_dequeue()
1076 __schedstat_set(se->statistics.sleep_start, in update_stats_dequeue()
1079 __schedstat_set(se->statistics.block_start, in update_stats_dequeue()
1088 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1093 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2283 now = p->se.exec_start; in numa_get_avg_runtime()
2284 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2294 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
2758 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
2889 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
2890 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
2959 now = curr->se.sum_exec_runtime; in task_tick_numa()
3026 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
3028 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
3030 if (entity_is_task(se)) { in account_entity_enqueue()
3033 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
3034 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
3041 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
3043 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
3045 if (entity_is_task(se)) { in account_entity_dequeue()
3046 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
3047 list_del_init(&se->group_node); in account_entity_dequeue()
3103 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
3105 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3106 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3110 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
3112 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3113 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3117 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
3119 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
3122 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
3125 if (se->on_rq) { in reweight_entity()
3127 if (cfs_rq->curr == se) in reweight_entity()
3129 update_load_sub(&cfs_rq->load, se->load.weight); in reweight_entity()
3131 dequeue_load_avg(cfs_rq, se); in reweight_entity()
3133 update_load_set(&se->load, weight); in reweight_entity()
3137 u32 divider = get_pelt_divider(&se->avg); in reweight_entity()
3139 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
3143 enqueue_load_avg(cfs_rq, se); in reweight_entity()
3144 if (se->on_rq) in reweight_entity()
3145 update_load_add(&cfs_rq->load, se->load.weight); in reweight_entity()
3151 struct sched_entity *se = &p->se; in reweight_task() local
3152 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task()
3153 struct load_weight *load = &se->load; in reweight_task()
3156 reweight_entity(cfs_rq, se, weight); in reweight_task()
3276 static void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3278 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3290 if (likely(se->load.weight == shares)) in update_cfs_group()
3296 reweight_entity(cfs_rq_of(se), se, shares); in update_cfs_group()
3300 static inline void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3365 void set_task_rq_fair(struct sched_entity *se, in set_task_rq_fair() argument
3381 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
3405 __update_load_avg_blocked_se(p_last_update_time, se); in set_task_rq_fair()
3406 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
3477 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
3479 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
3487 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in update_tg_cfs_util()
3493 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
3494 se->avg.util_sum = se->avg.util_avg * divider; in update_tg_cfs_util()
3502 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
3504 long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; in update_tg_cfs_runnable()
3512 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in update_tg_cfs_runnable()
3518 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; in update_tg_cfs_runnable()
3519 se->avg.runnable_sum = se->avg.runnable_avg * divider; in update_tg_cfs_runnable()
3527 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_load() argument
3540 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in update_tg_cfs_load()
3550 runnable_sum += se->avg.load_sum; in update_tg_cfs_load()
3562 /* But make sure to not inflate se's runnable */ in update_tg_cfs_load()
3563 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_load()
3572 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()
3575 load_sum = (s64)se_weight(se) * runnable_sum; in update_tg_cfs_load()
3578 delta = load_avg - se->avg.load_avg; in update_tg_cfs_load()
3580 se->avg.load_sum = runnable_sum; in update_tg_cfs_load()
3581 se->avg.load_avg = load_avg; in update_tg_cfs_load()
3594 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
3598 if (entity_is_task(se)) in propagate_entity_load_avg()
3601 gcfs_rq = group_cfs_rq(se); in propagate_entity_load_avg()
3607 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
3611 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3612 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3613 update_tg_cfs_load(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3616 trace_pelt_se_tp(se); in propagate_entity_load_avg()
3625 static inline bool skip_blocked_update(struct sched_entity *se) in skip_blocked_update() argument
3627 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
3633 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
3655 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
3706 * Because of rounding, se->util_sum might ends up being +1 more than in update_cfs_rq_load_avg()
3745 * @se: sched_entity to attach
3750 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
3753 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in attach_entity_load_avg()
3759 * When we attach the @se to the @cfs_rq, we must align the decay in attach_entity_load_avg()
3765 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
3766 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
3774 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
3776 se->avg.runnable_sum = se->avg.runnable_avg * divider; in attach_entity_load_avg()
3778 se->avg.load_sum = se->avg.load_avg * divider; in attach_entity_load_avg()
3779 if (se_weight(se) < se->avg.load_sum) in attach_entity_load_avg()
3780 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
3782 se->avg.load_sum = 1; in attach_entity_load_avg()
3784 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
3785 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
3786 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
3787 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
3788 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
3790 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
3800 * @se: sched_entity to detach
3805 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
3808 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in detach_entity_load_avg()
3813 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
3814 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
3816 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
3819 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
3834 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
3843 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
3844 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
3847 decayed |= propagate_entity_load_avg(se); in update_load_avg()
3849 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
3858 attach_entity_load_avg(cfs_rq, se); in update_load_avg()
3894 static void sync_entity_load_avg(struct sched_entity *se) in sync_entity_load_avg() argument
3896 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg()
3900 __update_load_avg_blocked_se(last_update_time, se); in sync_entity_load_avg()
3907 static void remove_entity_load_avg(struct sched_entity *se) in remove_entity_load_avg() argument
3909 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg()
3918 sync_entity_load_avg(se); in remove_entity_load_avg()
3922 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
3923 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
3924 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
3946 return READ_ONCE(p->se.avg.util_avg); in task_util()
3951 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
4061 ue = p->se.avg.util_est; in util_est_update()
4121 WRITE_ONCE(p->se.avg.util_est, ue); in util_est_update()
4123 trace_sched_util_est_se_tp(&p->se); in util_est_update()
4318 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
4323 static inline void remove_entity_load_avg(struct sched_entity *se) {} in remove_entity_load_avg() argument
4326 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in attach_entity_load_avg() argument
4328 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
4348 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread() argument
4351 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread()
4361 static inline bool entity_is_long_sleeper(struct sched_entity *se) in entity_is_long_sleeper() argument
4366 if (se->exec_start == 0) in entity_is_long_sleeper()
4369 cfs_rq = cfs_rq_of(se); in entity_is_long_sleeper()
4374 if (sleep_time <= se->exec_start) in entity_is_long_sleeper()
4377 sleep_time -= se->exec_start; in entity_is_long_sleeper()
4385 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity() argument
4396 vruntime += sched_vslice(cfs_rq, se); in place_entity()
4431 if (entity_is_long_sleeper(se)) in place_entity()
4432 se->vruntime = vruntime; in place_entity()
4434 se->vruntime = max_vruntime(se->vruntime, vruntime); in place_entity()
4492 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
4495 bool curr = cfs_rq->curr == se; in enqueue_entity()
4502 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
4513 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
4523 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
4524 se_update_runnable(se); in enqueue_entity()
4525 update_cfs_group(se); in enqueue_entity()
4526 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
4529 place_entity(cfs_rq, se, 0); in enqueue_entity()
4532 se->exec_start = 0; in enqueue_entity()
4535 update_stats_enqueue(cfs_rq, se, flags); in enqueue_entity()
4536 check_spread(cfs_rq, se); in enqueue_entity()
4538 __enqueue_entity(cfs_rq, se); in enqueue_entity()
4539 se->on_rq = 1; in enqueue_entity()
4553 static void __clear_buddies_last(struct sched_entity *se) in __clear_buddies_last() argument
4555 for_each_sched_entity(se) { in __clear_buddies_last()
4556 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last()
4557 if (cfs_rq->last != se) in __clear_buddies_last()
4564 static void __clear_buddies_next(struct sched_entity *se) in __clear_buddies_next() argument
4566 for_each_sched_entity(se) { in __clear_buddies_next()
4567 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next()
4568 if (cfs_rq->next != se) in __clear_buddies_next()
4575 static void __clear_buddies_skip(struct sched_entity *se) in __clear_buddies_skip() argument
4577 for_each_sched_entity(se) { in __clear_buddies_skip()
4578 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip()
4579 if (cfs_rq->skip != se) in __clear_buddies_skip()
4586 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
4588 if (cfs_rq->last == se) in clear_buddies()
4589 __clear_buddies_last(se); in clear_buddies()
4591 if (cfs_rq->next == se) in clear_buddies()
4592 __clear_buddies_next(se); in clear_buddies()
4594 if (cfs_rq->skip == se) in clear_buddies()
4595 __clear_buddies_skip(se); in clear_buddies()
4601 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
4616 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_entity()
4617 se_update_runnable(se); in dequeue_entity()
4619 update_stats_dequeue(cfs_rq, se, flags); in dequeue_entity()
4621 clear_buddies(cfs_rq, se); in dequeue_entity()
4623 if (se != cfs_rq->curr) in dequeue_entity()
4624 __dequeue_entity(cfs_rq, se); in dequeue_entity()
4625 se->on_rq = 0; in dequeue_entity()
4626 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
4630 * min_vruntime if @se is the one holding it back. But before doing in dequeue_entity()
4631 * update_min_vruntime() again, which will discount @se's position and in dequeue_entity()
4635 se->vruntime -= cfs_rq->min_vruntime; in dequeue_entity()
4640 update_cfs_group(se); in dequeue_entity()
4643 * Now advance min_vruntime if @se was the entity holding it back, in dequeue_entity()
4659 struct sched_entity *se; in check_preempt_tick() local
4682 se = __pick_first_entity(cfs_rq); in check_preempt_tick()
4683 delta = curr->vruntime - se->vruntime; in check_preempt_tick()
4693 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
4696 if (se->on_rq) { in set_next_entity()
4702 update_stats_wait_end(cfs_rq, se); in set_next_entity()
4703 __dequeue_entity(cfs_rq, se); in set_next_entity()
4704 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
4707 update_stats_curr_start(cfs_rq, se); in set_next_entity()
4708 cfs_rq->curr = se; in set_next_entity()
4716 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
4717 schedstat_set(se->statistics.slice_max, in set_next_entity()
4718 max((u64)schedstat_val(se->statistics.slice_max), in set_next_entity()
4719 se->sum_exec_runtime - se->prev_sum_exec_runtime)); in set_next_entity()
4722 se->prev_sum_exec_runtime = se->sum_exec_runtime; in set_next_entity()
4726 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
4739 struct sched_entity *se; in pick_next_entity() local
4748 se = left; /* ideally we run the leftmost entity */ in pick_next_entity()
4754 if (cfs_rq->skip == se) { in pick_next_entity()
4757 if (se == curr) { in pick_next_entity()
4760 second = __pick_next_entity(se); in pick_next_entity()
4766 se = second; in pick_next_entity()
4773 se = cfs_rq->next; in pick_next_entity()
4778 se = cfs_rq->last; in pick_next_entity()
4781 clear_buddies(cfs_rq, se); in pick_next_entity()
4783 return se; in pick_next_entity()
5046 struct sched_entity *se; in throttle_cfs_rq() local
5070 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
5079 for_each_sched_entity(se) { in throttle_cfs_rq()
5080 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5082 if (!se->on_rq) in throttle_cfs_rq()
5086 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); in throttle_cfs_rq()
5088 update_load_avg(qcfs_rq, se, 0); in throttle_cfs_rq()
5089 se_update_runnable(se); in throttle_cfs_rq()
5100 if (!se) { in throttle_cfs_rq()
5118 struct sched_entity *se; in unthrottle_cfs_rq() local
5122 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
5141 for_each_sched_entity(se) { in unthrottle_cfs_rq()
5142 if (se->on_rq) in unthrottle_cfs_rq()
5144 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
5145 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
5156 for_each_sched_entity(se) { in unthrottle_cfs_rq()
5157 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
5159 update_load_avg(cfs_rq, se, UPDATE_TG); in unthrottle_cfs_rq()
5160 se_update_runnable(se); in unthrottle_cfs_rq()
5178 /* At this point se is NULL and we are at root level*/ in unthrottle_cfs_rq()
5188 for_each_sched_entity(se) { in unthrottle_cfs_rq()
5189 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
5699 struct sched_entity *se = &p->se; in hrtick_start_fair() local
5700 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair()
5705 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair()
5706 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; in hrtick_start_fair()
5730 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) in hrtick_update()
5778 static void set_next_buddy(struct sched_entity *se);
5781 static void check_preempt_from_idle(struct cfs_rq *cfs, struct sched_entity *se) in check_preempt_from_idle() argument
5785 if (se->latency_weight <= 0) in check_preempt_from_idle()
5804 if (next && wakeup_preempt_entity(next, se) == 1) in check_preempt_from_idle()
5805 set_next_buddy(se); in check_preempt_from_idle()
5818 struct sched_entity *se = &p->se; in enqueue_task_fair() local
5838 for_each_sched_entity(se) { in enqueue_task_fair()
5839 if (se->on_rq) in enqueue_task_fair()
5841 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5842 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
5855 for_each_sched_entity(se) { in enqueue_task_fair()
5856 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5858 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
5859 se_update_runnable(se); in enqueue_task_fair()
5860 update_cfs_group(se); in enqueue_task_fair()
5878 /* At this point se is NULL and we are at root level*/ in enqueue_task_fair()
5900 check_preempt_from_idle(cfs_rq_of(&p->se), &p->se); in enqueue_task_fair()
5911 for_each_sched_entity(se) { in enqueue_task_fair()
5912 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5932 struct sched_entity *se = &p->se; in dequeue_task_fair() local
5939 for_each_sched_entity(se) { in dequeue_task_fair()
5940 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5941 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
5954 se = parent_entity(se); in dequeue_task_fair()
5959 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_task_fair()
5960 set_next_buddy(se); in dequeue_task_fair()
5966 for_each_sched_entity(se) { in dequeue_task_fair()
5967 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5969 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_task_fair()
5970 se_update_runnable(se); in dequeue_task_fair()
5971 update_cfs_group(se); in dequeue_task_fair()
5983 /* At this point se is NULL and we are at root level*/ in dequeue_task_fair()
6038 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
6061 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
6068 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
6210 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); in wake_affine()
6215 schedstat_inc(p->se.statistics.nr_wakeups_affine); in wake_affine()
6294 sync_entity_load_avg(&p->se); in find_idlest_cpu()
6587 sync_entity_load_avg(&p->se); in select_idle_sibling()
6769 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
7019 sync_entity_load_avg(&p->se); in find_energy_efficient_cpu()
7194 static void detach_entity_cfs_rq(struct sched_entity *se);
7210 struct sched_entity *se = &p->se; in migrate_task_rq_fair() local
7211 struct cfs_rq *cfs_rq = cfs_rq_of(se); in migrate_task_rq_fair()
7226 se->vruntime -= min_vruntime; in migrate_task_rq_fair()
7235 detach_entity_cfs_rq(&p->se); in migrate_task_rq_fair()
7246 remove_entity_load_avg(&p->se); in migrate_task_rq_fair()
7250 p->se.avg.last_update_time = 0; in migrate_task_rq_fair()
7257 remove_entity_load_avg(&p->se); in task_dead_fair()
7271 static long wakeup_latency_gran(struct sched_entity *curr, struct sched_entity *se) in wakeup_latency_gran() argument
7273 int latency_weight = se->latency_weight; in wakeup_latency_gran()
7280 * delay is acceptable by se. in wakeup_latency_gran()
7282 if ((se->latency_weight > 0) || (curr->latency_weight > 0)) in wakeup_latency_gran()
7303 static unsigned long wakeup_gran(struct sched_entity *se) in wakeup_gran() argument
7311 * By using 'se' instead of 'curr' we penalize light tasks, so in wakeup_gran()
7312 * they get preempted easier. That is, if 'se' < 'curr' then in wakeup_gran()
7314 * lighter, if otoh 'se' > 'curr' then the resulting gran will in wakeup_gran()
7320 return calc_delta_fair(gran, se); in wakeup_gran()
7324 * Should 'se' preempt 'curr'.
7338 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) in wakeup_preempt_entity() argument
7340 s64 gran, vdiff = curr->vruntime - se->vruntime; in wakeup_preempt_entity()
7344 vdiff += wakeup_latency_gran(curr, se); in wakeup_preempt_entity()
7350 gran = wakeup_gran(se); in wakeup_preempt_entity()
7357 static void set_last_buddy(struct sched_entity *se) in set_last_buddy() argument
7359 if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) in set_last_buddy()
7362 for_each_sched_entity(se) { in set_last_buddy()
7363 if (SCHED_WARN_ON(!se->on_rq)) in set_last_buddy()
7365 cfs_rq_of(se)->last = se; in set_last_buddy()
7369 static void set_next_buddy(struct sched_entity *se) in set_next_buddy() argument
7371 if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) in set_next_buddy()
7374 for_each_sched_entity(se) { in set_next_buddy()
7375 if (SCHED_WARN_ON(!se->on_rq)) in set_next_buddy()
7377 cfs_rq_of(se)->next = se; in set_next_buddy()
7381 static void set_skip_buddy(struct sched_entity *se) in set_skip_buddy() argument
7383 for_each_sched_entity(se) in set_skip_buddy()
7384 cfs_rq_of(se)->skip = se; in set_skip_buddy()
7393 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup() local
7398 if (unlikely(se == pse)) in check_preempt_wakeup()
7440 find_matching_se(&se, &pse); in check_preempt_wakeup()
7441 update_curr(cfs_rq_of(se)); in check_preempt_wakeup()
7443 if (wakeup_preempt_entity(se, pse) == 1) { in check_preempt_wakeup()
7466 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
7469 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) in check_preempt_wakeup()
7470 set_last_buddy(se); in check_preempt_wakeup()
7477 struct sched_entity *se; in pick_next_task_fair() local
7528 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
7529 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
7532 p = task_of(se); in pick_next_task_fair()
7540 struct sched_entity *pse = &prev->se; in pick_next_task_fair()
7542 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
7543 int se_depth = se->depth; in pick_next_task_fair()
7551 set_next_entity(cfs_rq_of(se), se); in pick_next_task_fair()
7552 se = parent_entity(se); in pick_next_task_fair()
7557 set_next_entity(cfs_rq, se); in pick_next_task_fair()
7567 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
7568 set_next_entity(cfs_rq, se); in pick_next_task_fair()
7569 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
7572 p = task_of(se); in pick_next_task_fair()
7581 list_move(&p->se.group_node, &rq->cfs_tasks); in pick_next_task_fair()
7627 struct sched_entity *se = &prev->se; in put_prev_task_fair() local
7630 for_each_sched_entity(se) { in put_prev_task_fair()
7631 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
7632 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
7645 struct sched_entity *se = &curr->se; in yield_task_fair() local
7653 clear_buddies(cfs_rq, se); in yield_task_fair()
7669 set_skip_buddy(se); in yield_task_fair()
7674 struct sched_entity *se = &p->se; in yield_to_task_fair() local
7677 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) in yield_to_task_fair()
7681 set_next_buddy(se); in yield_to_task_fair()
7914 (&p->se == cfs_rq_of(&p->se)->next || in task_hot()
7915 &p->se == cfs_rq_of(&p->se)->last)) in task_hot()
7923 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
8015 schedstat_inc(p->se.statistics.nr_failed_migrations_affine); in can_migrate_task()
8053 schedstat_inc(p->se.statistics.nr_failed_migrations_running); in can_migrate_task()
8071 schedstat_inc(p->se.statistics.nr_forced_migrations); in can_migrate_task()
8076 schedstat_inc(p->se.statistics.nr_failed_migrations_hot); in can_migrate_task()
8112 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
8167 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
8234 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
8257 list_move(&p->se.group_node, tasks); in detach_tasks()
8319 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
8320 list_del_init(&p->se.group_node); in attach_tasks()
8428 struct sched_entity *se; in __update_blocked_fair() local
8438 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
8439 if (se && !skip_blocked_update(se)) in __update_blocked_fair()
8440 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __update_blocked_fair()
8465 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load() local
8473 for_each_sched_entity(se) { in update_cfs_rq_h_load()
8474 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
8475 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
8480 if (!se) { in update_cfs_rq_h_load()
8485 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
8487 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
8489 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
8500 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
8518 return p->se.avg.load_avg; in task_h_load()
9211 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
11511 struct sched_entity *se = &curr->se; in task_tick_fair() local
11513 for_each_sched_entity(se) { in task_tick_fair()
11514 cfs_rq = cfs_rq_of(se); in task_tick_fair()
11515 entity_tick(cfs_rq, se, queued); in task_tick_fair()
11533 struct sched_entity *se = &p->se, *curr; in task_fork_fair() local
11544 se->vruntime = curr->vruntime; in task_fork_fair()
11546 place_entity(cfs_rq, se, 1); in task_fork_fair()
11548 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { in task_fork_fair()
11553 swap(curr->vruntime, se->vruntime); in task_fork_fair()
11557 se->vruntime -= cfs_rq->min_vruntime; in task_fork_fair()
11588 struct sched_entity *se = &p->se; in vruntime_normalized() local
11607 if (!se->sum_exec_runtime || in vruntime_normalized()
11619 static void propagate_entity_cfs_rq(struct sched_entity *se) in propagate_entity_cfs_rq() argument
11623 list_add_leaf_cfs_rq(cfs_rq_of(se)); in propagate_entity_cfs_rq()
11626 se = se->parent; in propagate_entity_cfs_rq()
11628 for_each_sched_entity(se) { in propagate_entity_cfs_rq()
11629 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
11632 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
11642 static void propagate_entity_cfs_rq(struct sched_entity *se) { } in propagate_entity_cfs_rq() argument
11645 static void detach_entity_cfs_rq(struct sched_entity *se) in detach_entity_cfs_rq() argument
11647 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq()
11650 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
11651 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
11653 propagate_entity_cfs_rq(se); in detach_entity_cfs_rq()
11656 static void attach_entity_cfs_rq(struct sched_entity *se) in attach_entity_cfs_rq() argument
11658 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq()
11665 se->depth = se->parent ? se->parent->depth + 1 : 0; in attach_entity_cfs_rq()
11669 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
11670 attach_entity_load_avg(cfs_rq, se); in attach_entity_cfs_rq()
11672 propagate_entity_cfs_rq(se); in attach_entity_cfs_rq()
11677 struct sched_entity *se = &p->se; in detach_task_cfs_rq() local
11678 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_task_cfs_rq()
11685 place_entity(cfs_rq, se, 0); in detach_task_cfs_rq()
11686 se->vruntime -= cfs_rq->min_vruntime; in detach_task_cfs_rq()
11689 detach_entity_cfs_rq(se); in detach_task_cfs_rq()
11694 struct sched_entity *se = &p->se; in attach_task_cfs_rq() local
11695 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_task_cfs_rq()
11697 attach_entity_cfs_rq(se); in attach_task_cfs_rq()
11700 se->vruntime += cfs_rq->min_vruntime; in attach_task_cfs_rq()
11732 struct sched_entity *se = &p->se; in set_next_task_fair() local
11740 list_move(&se->group_node, &rq->cfs_tasks); in set_next_task_fair()
11744 for_each_sched_entity(se) { in set_next_task_fair()
11745 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair()
11747 set_next_entity(cfs_rq, se); in set_next_task_fair()
11768 struct sched_entity *se = &p->se; in task_set_group_fair() local
11771 se->depth = se->parent ? se->parent->depth + 1 : 0; in task_set_group_fair()
11780 /* Tell se's cfs_rq has been changed -- migrated */ in task_move_group_fair()
11781 p->se.avg.last_update_time = 0; in task_move_group_fair()
11808 if (tg->se) in free_fair_sched_group()
11809 kfree(tg->se[i]); in free_fair_sched_group()
11813 kfree(tg->se); in free_fair_sched_group()
11818 struct sched_entity *se; in alloc_fair_sched_group() local
11825 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); in alloc_fair_sched_group()
11826 if (!tg->se) in alloc_fair_sched_group()
11839 se = kzalloc_node(sizeof(struct sched_entity), in alloc_fair_sched_group()
11841 if (!se) in alloc_fair_sched_group()
11845 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
11846 init_entity_runnable_average(se); in alloc_fair_sched_group()
11859 struct sched_entity *se; in online_fair_sched_group() local
11866 se = tg->se[i]; in online_fair_sched_group()
11869 attach_entity_cfs_rq(se); in online_fair_sched_group()
11882 if (tg->se[cpu]) in unregister_fair_sched_group()
11883 remove_entity_load_avg(tg->se[cpu]); in unregister_fair_sched_group()
11901 struct sched_entity *se, int cpu, in init_tg_cfs_entry() argument
11911 tg->se[cpu] = se; in init_tg_cfs_entry()
11913 /* se could be NULL for root_task_group */ in init_tg_cfs_entry()
11914 if (!se) in init_tg_cfs_entry()
11918 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
11919 se->depth = 0; in init_tg_cfs_entry()
11921 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
11922 se->depth = parent->depth + 1; in init_tg_cfs_entry()
11925 se->my_q = cfs_rq; in init_tg_cfs_entry()
11927 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
11928 se->parent = parent; in init_tg_cfs_entry()
11940 if (!tg->se[0]) in sched_group_set_shares()
11952 struct sched_entity *se = tg->se[i]; in sched_group_set_shares() local
11958 for_each_sched_entity(se) { in sched_group_set_shares()
11959 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in sched_group_set_shares()
11960 update_cfs_group(se); in sched_group_set_shares()
11987 struct sched_entity *se = &task->se; in get_rr_interval_fair() local
11995 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); in get_rr_interval_fair()
12162 struct sched_entity *se = &p->se; in walt_fixup_sched_stats_fair() local
12166 for_each_sched_entity(se) { in walt_fixup_sched_stats_fair()
12167 cfs_rq = cfs_rq_of(se); in walt_fixup_sched_stats_fair()
12176 if (!se) { in walt_fixup_sched_stats_fair()