• Home
  • Raw
  • Download

Lines Matching refs:sgs

8992 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)  in group_has_capacity()  argument
8994 if (sgs->sum_nr_running < sgs->group_weight) in group_has_capacity()
8997 if ((sgs->group_capacity * imbalance_pct) < in group_has_capacity()
8998 (sgs->group_runnable * 100)) in group_has_capacity()
9001 if ((sgs->group_capacity * 100) > in group_has_capacity()
9002 (sgs->group_util * imbalance_pct)) in group_has_capacity()
9017 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs) in group_is_overloaded() argument
9019 if (sgs->sum_nr_running <= sgs->group_weight) in group_is_overloaded()
9022 if ((sgs->group_capacity * 100) < in group_is_overloaded()
9023 (sgs->group_util * imbalance_pct)) in group_is_overloaded()
9026 if ((sgs->group_capacity * imbalance_pct) < in group_is_overloaded()
9027 (sgs->group_runnable * 100)) in group_is_overloaded()
9036 struct sg_lb_stats *sgs) in group_classify() argument
9038 if (group_is_overloaded(imbalance_pct, sgs)) in group_classify()
9044 if (sgs->group_asym_packing) in group_classify()
9047 if (sgs->group_misfit_task_load) in group_classify()
9050 if (!group_has_capacity(imbalance_pct, sgs)) in group_classify()
9065 struct sg_lb_stats *sgs, in update_sg_lb_stats() argument
9070 memset(sgs, 0, sizeof(*sgs)); in update_sg_lb_stats()
9077 sgs->group_load += cpu_load(rq); in update_sg_lb_stats()
9078 sgs->group_util += cpu_util(i); in update_sg_lb_stats()
9079 sgs->group_runnable += cpu_runnable(rq); in update_sg_lb_stats()
9080 sgs->sum_h_nr_running += rq->cfs.h_nr_running; in update_sg_lb_stats()
9083 sgs->sum_nr_running += nr_running; in update_sg_lb_stats()
9092 sgs->nr_numa_running += rq->nr_numa_running; in update_sg_lb_stats()
9093 sgs->nr_preferred_running += rq->nr_preferred_running; in update_sg_lb_stats()
9099 sgs->idle_cpus++; in update_sg_lb_stats()
9109 sgs->group_misfit_task_load < rq->misfit_task_load) { in update_sg_lb_stats()
9110 sgs->group_misfit_task_load = rq->misfit_task_load; in update_sg_lb_stats()
9118 sgs->sum_h_nr_running && in update_sg_lb_stats()
9120 sgs->group_asym_packing = 1; in update_sg_lb_stats()
9123 sgs->group_capacity = group->sgc->capacity; in update_sg_lb_stats()
9125 sgs->group_weight = group->group_weight; in update_sg_lb_stats()
9127 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); in update_sg_lb_stats()
9130 if (sgs->group_type == group_overloaded) in update_sg_lb_stats()
9131 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / in update_sg_lb_stats()
9132 sgs->group_capacity; in update_sg_lb_stats()
9151 struct sg_lb_stats *sgs) in update_sd_pick_busiest() argument
9156 if (!sgs->sum_h_nr_running) in update_sd_pick_busiest()
9165 if (sgs->group_type == group_misfit_task && in update_sd_pick_busiest()
9170 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest()
9173 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest()
9181 switch (sgs->group_type) { in update_sd_pick_busiest()
9184 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()
9206 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) in update_sd_pick_busiest()
9221 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()
9233 if (sgs->idle_cpus > busiest->idle_cpus) in update_sd_pick_busiest()
9235 else if ((sgs->idle_cpus == busiest->idle_cpus) && in update_sd_pick_busiest()
9236 (sgs->sum_nr_running <= busiest->sum_nr_running)) in update_sd_pick_busiest()
9249 (sgs->group_type <= group_fully_busy) && in update_sd_pick_busiest()
9257 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) in fbq_classify_group() argument
9259 if (sgs->sum_h_nr_running > sgs->nr_numa_running) in fbq_classify_group()
9261 if (sgs->sum_h_nr_running > sgs->nr_preferred_running) in fbq_classify_group()
9275 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) in fbq_classify_group() argument
9342 struct sg_lb_stats *sgs, in update_sg_wakeup_stats() argument
9347 memset(sgs, 0, sizeof(*sgs)); in update_sg_wakeup_stats()
9351 sgs->group_misfit_task_load = 1; in update_sg_wakeup_stats()
9357 sgs->group_load += cpu_load_without(rq, p); in update_sg_wakeup_stats()
9358 sgs->group_util += cpu_util_without(i, p); in update_sg_wakeup_stats()
9359 sgs->group_runnable += cpu_runnable_without(rq, p); in update_sg_wakeup_stats()
9361 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local; in update_sg_wakeup_stats()
9364 sgs->sum_nr_running += nr_running; in update_sg_wakeup_stats()
9370 sgs->idle_cpus++; in update_sg_wakeup_stats()
9374 sgs->group_misfit_task_load && in update_sg_wakeup_stats()
9376 sgs->group_misfit_task_load = 0; in update_sg_wakeup_stats()
9380 sgs->group_capacity = group->sgc->capacity; in update_sg_wakeup_stats()
9382 sgs->group_weight = group->group_weight; in update_sg_wakeup_stats()
9384 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs); in update_sg_wakeup_stats()
9390 if (sgs->group_type == group_fully_busy || in update_sg_wakeup_stats()
9391 sgs->group_type == group_overloaded) in update_sg_wakeup_stats()
9392 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / in update_sg_wakeup_stats()
9393 sgs->group_capacity; in update_sg_wakeup_stats()
9399 struct sg_lb_stats *sgs) in update_pick_idlest() argument
9401 if (sgs->group_type < idlest_sgs->group_type) in update_pick_idlest()
9404 if (sgs->group_type > idlest_sgs->group_type) in update_pick_idlest()
9412 switch (sgs->group_type) { in update_pick_idlest()
9416 if (idlest_sgs->avg_load <= sgs->avg_load) in update_pick_idlest()
9433 if (idlest_sgs->idle_cpus > sgs->idle_cpus) in update_pick_idlest()
9437 if (idlest_sgs->idle_cpus == sgs->idle_cpus && in update_pick_idlest()
9438 idlest_sgs->group_util <= sgs->group_util) in update_pick_idlest()
9469 struct sg_lb_stats *sgs; in find_idlest_group() local
9492 sgs = &local_sgs; in find_idlest_group()
9495 sgs = &tmp_sgs; in find_idlest_group()
9498 update_sg_wakeup_stats(sd, group, sgs, p); in find_idlest_group()
9500 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) { in find_idlest_group()
9502 idlest_sgs = *sgs; in find_idlest_group()
9700 struct sg_lb_stats *sgs = &tmp_sgs; in update_sd_lb_stats() local
9706 sgs = local; in update_sd_lb_stats()
9713 update_sg_lb_stats(env, sg, sgs, &sg_status); in update_sd_lb_stats()
9719 if (update_sd_pick_busiest(env, sds, sg, sgs)) { in update_sd_lb_stats()
9721 sds->busiest_stat = *sgs; in update_sd_lb_stats()
9726 sds->total_load += sgs->group_load; in update_sd_lb_stats()
9727 sds->total_capacity += sgs->group_capacity; in update_sd_lb_stats()
9729 sum_util += sgs->group_util; in update_sd_lb_stats()