Lines Matching refs:sds
6661 struct sched_domain_shared *sds; in set_idle_cores() local
6663 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); in set_idle_cores()
6664 if (sds) in set_idle_cores()
6665 WRITE_ONCE(sds->has_idle_cores, val); in set_idle_cores()
6670 struct sched_domain_shared *sds; in test_idle_cores() local
6672 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); in test_idle_cores()
6673 if (sds) in test_idle_cores()
6674 return READ_ONCE(sds->has_idle_cores); in test_idle_cores()
8978 static inline void init_sd_lb_stats(struct sd_lb_stats *sds) in init_sd_lb_stats() argument
8987 *sds = (struct sd_lb_stats){ in init_sd_lb_stats()
9262 static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds, in asym_smt_can_pull_tasks() argument
9270 local_is_smt = sds->local->flags & SD_SHARE_CPUCAPACITY; in asym_smt_can_pull_tasks()
9296 int local_busy_cpus = sds->local->group_weight - in asym_smt_can_pull_tasks()
9297 sds->local_stat.idle_cpus; in asym_smt_can_pull_tasks()
9311 if (!sds->local_stat.sum_nr_running) in asym_smt_can_pull_tasks()
9322 sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs, in sched_asym() argument
9326 if ((sds->local->flags & SD_SHARE_CPUCAPACITY) || in sched_asym()
9328 return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group); in sched_asym()
9355 struct sd_lb_stats *sds, in update_sg_lb_stats() argument
9364 local_group = group == sds->local; in update_sg_lb_stats()
9421 sched_asym(env, sds, sgs, group)) { in update_sg_lb_stats()
9447 struct sd_lb_stats *sds, in update_sd_pick_busiest() argument
9451 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest()
9466 sds->local_stat.group_type != group_has_spare)) in update_sd_pick_busiest()
9496 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) in update_sd_pick_busiest()
9994 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) in update_sd_lb_stats() argument
9998 struct sg_lb_stats *local = &sds->local_stat; in update_sd_lb_stats()
10009 sds->local = sg; in update_sd_lb_stats()
10017 update_sg_lb_stats(env, sds, sg, sgs, &sg_status); in update_sd_lb_stats()
10023 if (update_sd_pick_busiest(env, sds, sg, sgs)) { in update_sd_lb_stats()
10024 sds->busiest = sg; in update_sd_lb_stats()
10025 sds->busiest_stat = *sgs; in update_sd_lb_stats()
10030 sds->total_load += sgs->group_load; in update_sd_lb_stats()
10031 sds->total_capacity += sgs->group_capacity; in update_sd_lb_stats()
10038 sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING; in update_sd_lb_stats()
10042 env->fbq_type = fbq_classify_group(&sds->busiest_stat); in update_sd_lb_stats()
10069 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) in calculate_imbalance() argument
10073 local = &sds->local_stat; in calculate_imbalance()
10074 busiest = &sds->busiest_stat; in calculate_imbalance()
10148 if (busiest->group_weight == 1 || sds->prefer_sibling) { in calculate_imbalance()
10205 sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) / in calculate_imbalance()
10206 sds->total_capacity; in calculate_imbalance()
10212 if (local->avg_load >= sds->avg_load) { in calculate_imbalance()
10229 (busiest->avg_load - sds->avg_load) * busiest->group_capacity, in calculate_imbalance()
10230 (sds->avg_load - local->avg_load) * local->group_capacity in calculate_imbalance()
10269 struct sd_lb_stats sds; in find_busiest_group() local
10271 init_sd_lb_stats(&sds); in find_busiest_group()
10277 update_sd_lb_stats(env, &sds); in find_busiest_group()
10280 if (!sds.busiest) in find_busiest_group()
10283 busiest = &sds.busiest_stat; in find_busiest_group()
10293 trace_android_rvh_find_busiest_group(sds.busiest, env->dst_rq, in find_busiest_group()
10312 local = &sds.local_stat; in find_busiest_group()
10333 sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) / in find_busiest_group()
10334 sds.total_capacity; in find_busiest_group()
10340 if (local->avg_load >= sds.avg_load) in find_busiest_group()
10353 if (sds.prefer_sibling && local->group_type == group_has_spare && in find_busiest_group()
10388 calculate_imbalance(env, &sds); in find_busiest_group()
10389 return env->imbalance ? sds.busiest : NULL; in find_busiest_group()
11246 struct sched_domain_shared *sds; in nohz_balancer_kick() local
11335 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); in nohz_balancer_kick()
11336 if (sds) { in nohz_balancer_kick()
11346 nr_busy = atomic_read(&sds->nr_busy_cpus); in nohz_balancer_kick()