Lines Matching refs:se
352 #define for_each_sched_entity(se) \ argument
353 for (; se; se = se->parent)
455 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
457 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
458 return se->cfs_rq; in is_same_group()
463 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
465 return se->parent; in parent_entity()
469 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
481 se_depth = (*se)->depth; in find_matching_se()
486 *se = parent_entity(*se); in find_matching_se()
494 while (!is_same_group(*se, *pse)) { in find_matching_se()
495 *se = parent_entity(*se); in find_matching_se()
510 static int se_is_idle(struct sched_entity *se) in se_is_idle() argument
512 if (entity_is_task(se)) in se_is_idle()
513 return task_has_idle_policy(task_of(se)); in se_is_idle()
514 return cfs_rq_is_idle(group_cfs_rq(se)); in se_is_idle()
519 #define for_each_sched_entity(se) \ argument
520 for (; se; se = NULL)
538 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
544 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
558 static int se_is_idle(struct sched_entity *se) in se_is_idle() argument
614 struct sched_entity *se = __node_2_se(leftmost); in update_min_vruntime() local
617 vruntime = se->vruntime; in update_min_vruntime()
619 vruntime = min_vruntime(vruntime, se->vruntime); in update_min_vruntime()
635 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
637 trace_android_rvh_enqueue_entity(cfs_rq, se); in __enqueue_entity()
638 rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less); in __enqueue_entity()
641 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
643 trace_android_rvh_dequeue_entity(cfs_rq, se); in __dequeue_entity()
644 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); in __dequeue_entity()
657 static struct sched_entity *__pick_next_entity(struct sched_entity *se) in __pick_next_entity() argument
659 struct rb_node *next = rb_next(&se->run_node); in __pick_next_entity()
703 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) in calc_delta_fair() argument
705 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair()
706 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
735 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice() argument
738 struct sched_entity *init_se = se; in sched_slice()
745 slice = __sched_period(nr_running + !se->on_rq); in sched_slice()
747 for_each_sched_entity(se) { in sched_slice()
752 qcfs_rq = cfs_rq_of(se); in sched_slice()
755 if (unlikely(!se->on_rq)) { in sched_slice()
758 update_load_add(&lw, se->load.weight); in sched_slice()
761 slice = __calc_delta(slice, se->load.weight, load); in sched_slice()
781 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice() argument
783 return calc_delta_fair(sched_slice(cfs_rq, se), se); in sched_vslice()
794 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
796 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
806 if (entity_is_task(se)) in init_entity_runnable_average()
807 sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average()
840 struct sched_entity *se = &p->se; in post_init_entity_util_avg() local
841 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg()
842 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
857 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
863 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
876 trace_android_rvh_post_init_entity_util_avg(se); in post_init_entity_util_avg()
880 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
936 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
940 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start_fair() argument
948 stats = __schedstats_from_se(se); in update_stats_wait_start_fair()
950 if (entity_is_task(se)) in update_stats_wait_start_fair()
951 p = task_of(se); in update_stats_wait_start_fair()
957 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end_fair() argument
965 stats = __schedstats_from_se(se); in update_stats_wait_end_fair()
976 if (entity_is_task(se)) in update_stats_wait_end_fair()
977 p = task_of(se); in update_stats_wait_end_fair()
983 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper_fair() argument
991 stats = __schedstats_from_se(se); in update_stats_enqueue_sleeper_fair()
993 if (entity_is_task(se)) in update_stats_enqueue_sleeper_fair()
994 tsk = task_of(se); in update_stats_enqueue_sleeper_fair()
1003 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue_fair() argument
1012 if (se != cfs_rq->curr) in update_stats_enqueue_fair()
1013 update_stats_wait_start_fair(cfs_rq, se); in update_stats_enqueue_fair()
1016 update_stats_enqueue_sleeper_fair(cfs_rq, se); in update_stats_enqueue_fair()
1020 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue_fair() argument
1030 if (se != cfs_rq->curr) in update_stats_dequeue_fair()
1031 update_stats_wait_end_fair(cfs_rq, se); in update_stats_dequeue_fair()
1033 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { in update_stats_dequeue_fair()
1034 struct task_struct *tsk = task_of(se); in update_stats_dequeue_fair()
1052 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1057 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2443 now = p->se.exec_start; in numa_get_avg_runtime()
2444 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2454 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
2940 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
3075 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
3076 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
3148 now = curr->se.sum_exec_runtime; in task_tick_numa()
3215 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
3217 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
3219 if (entity_is_task(se)) { in account_entity_enqueue()
3222 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
3223 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
3227 if (se_is_idle(se)) in account_entity_enqueue()
3232 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
3234 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
3236 if (entity_is_task(se)) { in account_entity_dequeue()
3237 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
3238 list_del_init(&se->group_node); in account_entity_dequeue()
3242 if (se_is_idle(se)) in account_entity_dequeue()
3296 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
3298 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3299 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3303 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
3305 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3306 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3313 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
3315 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
3318 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
3321 if (se->on_rq) { in reweight_entity()
3323 if (cfs_rq->curr == se) in reweight_entity()
3325 update_load_sub(&cfs_rq->load, se->load.weight); in reweight_entity()
3327 dequeue_load_avg(cfs_rq, se); in reweight_entity()
3329 update_load_set(&se->load, weight); in reweight_entity()
3333 u32 divider = get_pelt_divider(&se->avg); in reweight_entity()
3335 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
3339 enqueue_load_avg(cfs_rq, se); in reweight_entity()
3340 if (se->on_rq) in reweight_entity()
3341 update_load_add(&cfs_rq->load, se->load.weight); in reweight_entity()
3347 struct sched_entity *se = &p->se; in reweight_task() local
3348 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task()
3349 struct load_weight *load = &se->load; in reweight_task()
3352 reweight_entity(cfs_rq, se, weight); in reweight_task()
3473 static void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3475 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3487 if (likely(se->load.weight == shares)) in update_cfs_group()
3493 reweight_entity(cfs_rq_of(se), se, shares); in update_cfs_group()
3497 static inline void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3630 void set_task_rq_fair(struct sched_entity *se, in set_task_rq_fair() argument
3646 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
3652 __update_load_avg_blocked_se(p_last_update_time, se); in set_task_rq_fair()
3653 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
3724 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
3726 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
3741 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
3742 new_sum = se->avg.util_avg * divider; in update_tg_cfs_util()
3743 delta_sum = (long)new_sum - (long)se->avg.util_sum; in update_tg_cfs_util()
3744 se->avg.util_sum = new_sum; in update_tg_cfs_util()
3756 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
3758 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; in update_tg_cfs_runnable()
3772 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; in update_tg_cfs_runnable()
3773 new_sum = se->avg.runnable_avg * divider; in update_tg_cfs_runnable()
3774 delta_sum = (long)new_sum - (long)se->avg.runnable_sum; in update_tg_cfs_runnable()
3775 se->avg.runnable_sum = new_sum; in update_tg_cfs_runnable()
3786 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_load() argument
3810 runnable_sum += se->avg.load_sum; in update_tg_cfs_load()
3823 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_load()
3832 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()
3835 load_sum = se_weight(se) * runnable_sum; in update_tg_cfs_load()
3838 delta_avg = load_avg - se->avg.load_avg; in update_tg_cfs_load()
3842 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; in update_tg_cfs_load()
3844 se->avg.load_sum = runnable_sum; in update_tg_cfs_load()
3845 se->avg.load_avg = load_avg; in update_tg_cfs_load()
3860 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
3864 if (entity_is_task(se)) in propagate_entity_load_avg()
3867 gcfs_rq = group_cfs_rq(se); in propagate_entity_load_avg()
3873 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
3877 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3878 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3879 update_tg_cfs_load(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3882 trace_pelt_se_tp(se); in propagate_entity_load_avg()
3891 static inline bool skip_blocked_update(struct sched_entity *se) in skip_blocked_update() argument
3893 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
3899 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
3921 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
3931 static inline void migrate_se_pelt_lag(struct sched_entity *se) in migrate_se_pelt_lag() argument
3938 if (load_avg_is_decayed(&se->avg)) in migrate_se_pelt_lag()
3941 cfs_rq = cfs_rq_of(se); in migrate_se_pelt_lag()
4007 __update_load_avg_blocked_se(now, se); in migrate_se_pelt_lag()
4010 static void migrate_se_pelt_lag(struct sched_entity *se) {} in migrate_se_pelt_lag() argument
4100 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
4115 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4116 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4124 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
4126 se->avg.runnable_sum = se->avg.runnable_avg * divider; in attach_entity_load_avg()
4128 se->avg.load_sum = se->avg.load_avg * divider; in attach_entity_load_avg()
4129 if (se_weight(se) < se->avg.load_sum) in attach_entity_load_avg()
4130 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
4132 se->avg.load_sum = 1; in attach_entity_load_avg()
4134 trace_android_rvh_attach_entity_load_avg(cfs_rq, se); in attach_entity_load_avg()
4136 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
4137 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4138 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4139 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4140 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4142 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4157 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
4159 trace_android_rvh_detach_entity_load_avg(cfs_rq, se); in detach_entity_load_avg()
4161 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
4162 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4163 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4168 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4169 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4174 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4190 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
4199 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
4200 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
4203 decayed |= propagate_entity_load_avg(se); in update_load_avg()
4205 trace_android_rvh_update_load_avg(now, cfs_rq, se); in update_load_avg()
4207 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
4216 attach_entity_load_avg(cfs_rq, se); in update_load_avg()
4224 detach_entity_load_avg(cfs_rq, se); in update_load_avg()
4238 static void sync_entity_load_avg(struct sched_entity *se) in sync_entity_load_avg() argument
4240 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg()
4244 __update_load_avg_blocked_se(last_update_time, se); in sync_entity_load_avg()
4251 static void remove_entity_load_avg(struct sched_entity *se) in remove_entity_load_avg() argument
4253 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg()
4262 sync_entity_load_avg(se); in remove_entity_load_avg()
4264 trace_android_rvh_remove_entity_load_avg(cfs_rq, se); in remove_entity_load_avg()
4268 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4269 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4270 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4288 return READ_ONCE(p->se.avg.util_avg); in task_util()
4293 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
4376 ue = p->se.avg.util_est; in util_est_update()
4436 WRITE_ONCE(p->se.avg.util_est, ue); in util_est_update()
4438 trace_sched_util_est_se_tp(&p->se); in util_est_update()
4615 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
4620 static inline void remove_entity_load_avg(struct sched_entity *se) {} in remove_entity_load_avg() argument
4623 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in attach_entity_load_avg() argument
4625 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
4645 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread() argument
4648 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread()
4658 static inline bool entity_is_long_sleeper(struct sched_entity *se) in entity_is_long_sleeper() argument
4663 if (se->exec_start == 0) in entity_is_long_sleeper()
4666 cfs_rq = cfs_rq_of(se); in entity_is_long_sleeper()
4671 if (sleep_time <= se->exec_start) in entity_is_long_sleeper()
4674 sleep_time -= se->exec_start; in entity_is_long_sleeper()
4682 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity() argument
4693 vruntime += sched_vslice(cfs_rq, se); in place_entity()
4699 if (se_is_idle(se)) in place_entity()
4714 trace_android_rvh_place_entity(cfs_rq, se, initial, &vruntime); in place_entity()
4734 if (entity_is_long_sleeper(se)) in place_entity()
4735 se->vruntime = vruntime; in place_entity()
4737 se->vruntime = max_vruntime(se->vruntime, vruntime); in place_entity()
4775 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
4778 bool curr = cfs_rq->curr == se; in enqueue_entity()
4785 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
4796 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
4807 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
4808 se_update_runnable(se); in enqueue_entity()
4809 update_cfs_group(se); in enqueue_entity()
4810 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
4813 place_entity(cfs_rq, se, 0); in enqueue_entity()
4816 se->exec_start = 0; in enqueue_entity()
4819 update_stats_enqueue_fair(cfs_rq, se, flags); in enqueue_entity()
4820 check_spread(cfs_rq, se); in enqueue_entity()
4822 __enqueue_entity(cfs_rq, se); in enqueue_entity()
4823 se->on_rq = 1; in enqueue_entity()
4832 static void __clear_buddies_last(struct sched_entity *se) in __clear_buddies_last() argument
4834 for_each_sched_entity(se) { in __clear_buddies_last()
4835 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last()
4836 if (cfs_rq->last != se) in __clear_buddies_last()
4843 static void __clear_buddies_next(struct sched_entity *se) in __clear_buddies_next() argument
4845 for_each_sched_entity(se) { in __clear_buddies_next()
4846 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next()
4847 if (cfs_rq->next != se) in __clear_buddies_next()
4854 static void __clear_buddies_skip(struct sched_entity *se) in __clear_buddies_skip() argument
4856 for_each_sched_entity(se) { in __clear_buddies_skip()
4857 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip()
4858 if (cfs_rq->skip != se) in __clear_buddies_skip()
4865 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
4867 if (cfs_rq->last == se) in clear_buddies()
4868 __clear_buddies_last(se); in clear_buddies()
4870 if (cfs_rq->next == se) in clear_buddies()
4871 __clear_buddies_next(se); in clear_buddies()
4873 if (cfs_rq->skip == se) in clear_buddies()
4874 __clear_buddies_skip(se); in clear_buddies()
4880 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
4884 if (entity_is_task(se) && task_on_rq_migrating(task_of(se))) in dequeue_entity()
4901 update_load_avg(cfs_rq, se, action); in dequeue_entity()
4902 se_update_runnable(se); in dequeue_entity()
4904 update_stats_dequeue_fair(cfs_rq, se, flags); in dequeue_entity()
4906 clear_buddies(cfs_rq, se); in dequeue_entity()
4908 if (se != cfs_rq->curr) in dequeue_entity()
4909 __dequeue_entity(cfs_rq, se); in dequeue_entity()
4910 se->on_rq = 0; in dequeue_entity()
4911 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
4920 se->vruntime -= cfs_rq->min_vruntime; in dequeue_entity()
4925 update_cfs_group(se); in dequeue_entity()
4947 struct sched_entity *se; in check_preempt_tick() local
4981 se = __pick_first_entity(cfs_rq); in check_preempt_tick()
4982 delta = curr->vruntime - se->vruntime; in check_preempt_tick()
4991 void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
4993 clear_buddies(cfs_rq, se); in set_next_entity()
4996 if (se->on_rq) { in set_next_entity()
5002 update_stats_wait_end_fair(cfs_rq, se); in set_next_entity()
5003 __dequeue_entity(cfs_rq, se); in set_next_entity()
5004 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
5007 update_stats_curr_start(cfs_rq, se); in set_next_entity()
5008 cfs_rq->curr = se; in set_next_entity()
5016 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
5019 stats = __schedstats_from_se(se); in set_next_entity()
5022 se->sum_exec_runtime - se->prev_sum_exec_runtime)); in set_next_entity()
5025 se->prev_sum_exec_runtime = se->sum_exec_runtime; in set_next_entity()
5030 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
5043 struct sched_entity *se = NULL; in pick_next_entity() local
5045 trace_android_rvh_pick_next_entity(cfs_rq, curr, &se); in pick_next_entity()
5046 if (se) in pick_next_entity()
5056 se = left; /* ideally we run the leftmost entity */ in pick_next_entity()
5062 if (cfs_rq->skip && cfs_rq->skip == se) { in pick_next_entity()
5065 if (se == curr) { in pick_next_entity()
5068 second = __pick_next_entity(se); in pick_next_entity()
5074 se = second; in pick_next_entity()
5081 se = cfs_rq->next; in pick_next_entity()
5086 se = cfs_rq->last; in pick_next_entity()
5090 return se; in pick_next_entity()
5366 struct sched_entity *se; in throttle_cfs_rq() local
5390 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
5399 for_each_sched_entity(se) { in throttle_cfs_rq()
5400 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5402 if (!se->on_rq) in throttle_cfs_rq()
5405 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); in throttle_cfs_rq()
5407 if (cfs_rq_is_idle(group_cfs_rq(se))) in throttle_cfs_rq()
5415 se = parent_entity(se); in throttle_cfs_rq()
5420 for_each_sched_entity(se) { in throttle_cfs_rq()
5421 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5423 if (!se->on_rq) in throttle_cfs_rq()
5426 update_load_avg(qcfs_rq, se, 0); in throttle_cfs_rq()
5427 se_update_runnable(se); in throttle_cfs_rq()
5429 if (cfs_rq_is_idle(group_cfs_rq(se))) in throttle_cfs_rq()
5453 struct sched_entity *se; in unthrottle_cfs_rq() local
5456 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
5477 for_each_sched_entity(se) { in unthrottle_cfs_rq()
5478 if (list_add_leaf_cfs_rq(cfs_rq_of(se))) in unthrottle_cfs_rq()
5486 for_each_sched_entity(se) { in unthrottle_cfs_rq()
5487 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
5489 if (se->on_rq) in unthrottle_cfs_rq()
5491 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
5493 if (cfs_rq_is_idle(group_cfs_rq(se))) in unthrottle_cfs_rq()
5504 for_each_sched_entity(se) { in unthrottle_cfs_rq()
5505 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
5507 update_load_avg(qcfs_rq, se, UPDATE_TG); in unthrottle_cfs_rq()
5508 se_update_runnable(se); in unthrottle_cfs_rq()
5510 if (cfs_rq_is_idle(group_cfs_rq(se))) in unthrottle_cfs_rq()
6028 struct sched_entity *se = &p->se; in hrtick_start_fair() local
6029 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair()
6034 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair()
6035 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; in hrtick_start_fair()
6059 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) in hrtick_update()
6133 struct sched_entity *se = &p->se; in enqueue_task_fair() local
6156 for_each_sched_entity(se) { in enqueue_task_fair()
6157 if (se->on_rq) in enqueue_task_fair()
6159 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6160 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
6176 for_each_sched_entity(se) { in enqueue_task_fair()
6177 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6179 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
6180 se_update_runnable(se); in enqueue_task_fair()
6181 update_cfs_group(se); in enqueue_task_fair()
6220 static void set_next_buddy(struct sched_entity *se);
6230 struct sched_entity *se = &p->se; in dequeue_task_fair() local
6237 for_each_sched_entity(se) { in dequeue_task_fair()
6238 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
6239 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
6254 se = parent_entity(se); in dequeue_task_fair()
6259 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_task_fair()
6260 set_next_buddy(se); in dequeue_task_fair()
6267 for_each_sched_entity(se) { in dequeue_task_fair()
6268 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
6270 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_task_fair()
6271 se_update_runnable(se); in dequeue_task_fair()
6272 update_cfs_group(se); in dequeue_task_fair()
6341 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
6364 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
6371 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
6606 sync_entity_load_avg(&p->se); in find_idlest_cpu()
6957 sync_entity_load_avg(&p->se); in select_idle_sibling()
7127 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
7316 sync_entity_load_avg(&p->se); in find_energy_efficient_cpu()
7522 sync_entity_load_avg(&p->se); in select_task_rq_fair()
7590 struct sched_entity *se = &p->se; in migrate_task_rq_fair() local
7599 struct cfs_rq *cfs_rq = cfs_rq_of(se); in migrate_task_rq_fair()
7601 se->vruntime -= u64_u32_load(cfs_rq->min_vruntime); in migrate_task_rq_fair()
7605 remove_entity_load_avg(se); in migrate_task_rq_fair()
7617 migrate_se_pelt_lag(se); in migrate_task_rq_fair()
7621 se->avg.last_update_time = 0; in migrate_task_rq_fair()
7628 remove_entity_load_avg(&p->se); in task_dead_fair()
7641 static unsigned long wakeup_gran(struct sched_entity *se) in wakeup_gran() argument
7658 return calc_delta_fair(gran, se); in wakeup_gran()
7676 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) in wakeup_preempt_entity() argument
7678 s64 gran, vdiff = curr->vruntime - se->vruntime; in wakeup_preempt_entity()
7683 gran = wakeup_gran(se); in wakeup_preempt_entity()
7690 static void set_last_buddy(struct sched_entity *se) in set_last_buddy() argument
7692 for_each_sched_entity(se) { in set_last_buddy()
7693 if (SCHED_WARN_ON(!se->on_rq)) in set_last_buddy()
7695 if (se_is_idle(se)) in set_last_buddy()
7697 cfs_rq_of(se)->last = se; in set_last_buddy()
7701 static void set_next_buddy(struct sched_entity *se) in set_next_buddy() argument
7703 for_each_sched_entity(se) { in set_next_buddy()
7704 if (SCHED_WARN_ON(!se->on_rq)) in set_next_buddy()
7706 if (se_is_idle(se)) in set_next_buddy()
7708 cfs_rq_of(se)->next = se; in set_next_buddy()
7712 static void set_skip_buddy(struct sched_entity *se) in set_skip_buddy() argument
7714 for_each_sched_entity(se) in set_skip_buddy()
7715 cfs_rq_of(se)->skip = se; in set_skip_buddy()
7724 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup() local
7732 if (unlikely(se == pse)) in check_preempt_wakeup()
7777 find_matching_se(&se, &pse); in check_preempt_wakeup()
7780 cse_is_idle = se_is_idle(se); in check_preempt_wakeup()
7792 update_curr(cfs_rq_of(se)); in check_preempt_wakeup()
7794 wake_flags, se, pse, next_buddy_marked, sysctl_sched_wakeup_granularity); in check_preempt_wakeup()
7800 if (wakeup_preempt_entity(se, pse) == 1) { in check_preempt_wakeup()
7823 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
7826 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) in check_preempt_wakeup()
7827 set_last_buddy(se); in check_preempt_wakeup()
7833 struct sched_entity *se; in pick_task_fair() local
7855 se = pick_next_entity(cfs_rq, curr); in pick_task_fair()
7856 cfs_rq = group_cfs_rq(se); in pick_task_fair()
7859 return task_of(se); in pick_task_fair()
7867 struct sched_entity *se = NULL; in pick_next_task_fair() local
7919 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
7920 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
7923 p = task_of(se); in pick_next_task_fair()
7924 trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, false, prev); in pick_next_task_fair()
7931 struct sched_entity *pse = &prev->se; in pick_next_task_fair()
7933 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
7934 int se_depth = se->depth; in pick_next_task_fair()
7942 set_next_entity(cfs_rq_of(se), se); in pick_next_task_fair()
7943 se = parent_entity(se); in pick_next_task_fair()
7948 set_next_entity(cfs_rq, se); in pick_next_task_fair()
7957 trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, true, prev); in pick_next_task_fair()
7962 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
7963 set_next_entity(cfs_rq, se); in pick_next_task_fair()
7964 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
7967 p = task_of(se); in pick_next_task_fair()
7976 list_move(&p->se.group_node, &rq->cfs_tasks); in pick_next_task_fair()
8022 struct sched_entity *se = &prev->se; in put_prev_task_fair() local
8025 for_each_sched_entity(se) { in put_prev_task_fair()
8026 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
8027 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
8040 struct sched_entity *se = &curr->se; in yield_task_fair() local
8048 clear_buddies(cfs_rq, se); in yield_task_fair()
8064 set_skip_buddy(se); in yield_task_fair()
8069 struct sched_entity *se = &p->se; in yield_to_task_fair() local
8072 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) in yield_to_task_fair()
8076 set_next_buddy(se); in yield_to_task_fair()
8309 (&p->se == cfs_rq_of(&p->se)->next || in task_hot()
8310 &p->se == cfs_rq_of(&p->se)->last)) in task_hot()
8326 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
8524 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
8593 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
8648 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
8671 list_move(&p->se.group_node, tasks); in detach_tasks()
8724 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
8725 list_del_init(&p->se.group_node); in attach_tasks()
8822 struct sched_entity *se; in __update_blocked_fair() local
8835 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
8836 if (se && !skip_blocked_update(se)) in __update_blocked_fair()
8837 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __update_blocked_fair()
8862 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load() local
8870 for_each_sched_entity(se) { in update_cfs_rq_h_load()
8871 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
8872 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
8877 if (!se) { in update_cfs_rq_h_load()
8882 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
8884 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
8886 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
8897 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
8915 return p->se.avg.load_avg; in task_h_load()
9595 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
11867 __entity_slice_used(struct sched_entity *se, int min_nr_tasks) in __entity_slice_used() argument
11869 u64 slice = sched_slice(cfs_rq_of(se), se); in __entity_slice_used()
11870 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime; in __entity_slice_used()
11896 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) in task_tick_core()
11903 static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle) in se_fi_update() argument
11905 for_each_sched_entity(se) { in se_fi_update()
11906 struct cfs_rq *cfs_rq = cfs_rq_of(se); in se_fi_update()
11920 struct sched_entity *se = &p->se; in task_vruntime_update() local
11925 se_fi_update(se, rq->core->core_forceidle_seq, in_fi); in task_vruntime_update()
11931 struct sched_entity *sea = &a->se; in cfs_prio_less()
11932 struct sched_entity *seb = &b->se; in cfs_prio_less()
11989 struct sched_entity *se = &curr->se; in task_tick_fair() local
11991 for_each_sched_entity(se) { in task_tick_fair()
11992 cfs_rq = cfs_rq_of(se); in task_tick_fair()
11993 entity_tick(cfs_rq, se, queued); in task_tick_fair()
12013 struct sched_entity *se = &p->se, *curr; in task_fork_fair() local
12024 se->vruntime = curr->vruntime; in task_fork_fair()
12026 place_entity(cfs_rq, se, 1); in task_fork_fair()
12028 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { in task_fork_fair()
12033 swap(curr->vruntime, se->vruntime); in task_fork_fair()
12037 se->vruntime -= cfs_rq->min_vruntime; in task_fork_fair()
12068 struct sched_entity *se = &p->se; in vruntime_normalized() local
12087 if (!se->sum_exec_runtime || in vruntime_normalized()
12099 static void propagate_entity_cfs_rq(struct sched_entity *se) in propagate_entity_cfs_rq() argument
12101 struct cfs_rq *cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
12110 se = se->parent; in propagate_entity_cfs_rq()
12112 for_each_sched_entity(se) { in propagate_entity_cfs_rq()
12113 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
12115 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
12125 static void propagate_entity_cfs_rq(struct sched_entity *se) { } in propagate_entity_cfs_rq() argument
12128 static void detach_entity_cfs_rq(struct sched_entity *se) in detach_entity_cfs_rq() argument
12130 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq()
12139 if (!se->avg.last_update_time) in detach_entity_cfs_rq()
12144 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
12145 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
12147 propagate_entity_cfs_rq(se); in detach_entity_cfs_rq()
12150 static void attach_entity_cfs_rq(struct sched_entity *se) in attach_entity_cfs_rq() argument
12152 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq()
12155 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
12156 attach_entity_load_avg(cfs_rq, se); in attach_entity_cfs_rq()
12158 propagate_entity_cfs_rq(se); in attach_entity_cfs_rq()
12163 struct sched_entity *se = &p->se; in detach_task_cfs_rq() local
12164 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_task_cfs_rq()
12171 place_entity(cfs_rq, se, 0); in detach_task_cfs_rq()
12172 se->vruntime -= cfs_rq->min_vruntime; in detach_task_cfs_rq()
12175 detach_entity_cfs_rq(se); in detach_task_cfs_rq()
12180 struct sched_entity *se = &p->se; in attach_task_cfs_rq() local
12181 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_task_cfs_rq()
12183 attach_entity_cfs_rq(se); in attach_task_cfs_rq()
12186 se->vruntime += cfs_rq->min_vruntime; in attach_task_cfs_rq()
12218 struct sched_entity *se = &p->se; in set_next_task_fair() local
12226 list_move(&se->group_node, &rq->cfs_tasks); in set_next_task_fair()
12230 for_each_sched_entity(se) { in set_next_task_fair()
12231 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair()
12233 set_next_entity(cfs_rq, se); in set_next_task_fair()
12262 p->se.avg.last_update_time = 0; in task_change_group_fair()
12275 if (tg->se) in free_fair_sched_group()
12276 kfree(tg->se[i]); in free_fair_sched_group()
12280 kfree(tg->se); in free_fair_sched_group()
12285 struct sched_entity *se; in alloc_fair_sched_group() local
12292 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); in alloc_fair_sched_group()
12293 if (!tg->se) in alloc_fair_sched_group()
12306 se = kzalloc_node(sizeof(struct sched_entity_stats), in alloc_fair_sched_group()
12308 if (!se) in alloc_fair_sched_group()
12312 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
12313 init_entity_runnable_average(se); in alloc_fair_sched_group()
12326 struct sched_entity *se; in online_fair_sched_group() local
12333 se = tg->se[i]; in online_fair_sched_group()
12336 attach_entity_cfs_rq(se); in online_fair_sched_group()
12351 if (tg->se[cpu]) in unregister_fair_sched_group()
12352 remove_entity_load_avg(tg->se[cpu]); in unregister_fair_sched_group()
12370 struct sched_entity *se, int cpu, in init_tg_cfs_entry() argument
12380 tg->se[cpu] = se; in init_tg_cfs_entry()
12383 if (!se) in init_tg_cfs_entry()
12387 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
12388 se->depth = 0; in init_tg_cfs_entry()
12390 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
12391 se->depth = parent->depth + 1; in init_tg_cfs_entry()
12394 se->my_q = cfs_rq; in init_tg_cfs_entry()
12396 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
12397 se->parent = parent; in init_tg_cfs_entry()
12411 if (!tg->se[0]) in __sched_group_set_shares()
12422 struct sched_entity *se = tg->se[i]; in __sched_group_set_shares() local
12428 for_each_sched_entity(se) { in __sched_group_set_shares()
12429 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __sched_group_set_shares()
12430 update_cfs_group(se); in __sched_group_set_shares()
12473 struct sched_entity *se = tg->se[i]; in sched_group_set_idle() local
12485 if (se->on_rq) { in sched_group_set_idle()
12486 parent_cfs_rq = cfs_rq_of(se); in sched_group_set_idle()
12498 for_each_sched_entity(se) { in sched_group_set_idle()
12499 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sched_group_set_idle()
12501 if (!se->on_rq) in sched_group_set_idle()
12543 struct sched_entity *se = &task->se; in get_rr_interval_fair() local
12551 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); in get_rr_interval_fair()