• Home
  • Raw
  • Download

Lines Matching refs:env

1788 static void update_numa_stats(struct task_numa_env *env,  in update_numa_stats()  argument
1809 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) in update_numa_stats()
1822 ns->node_type = numa_classify(env->imbalance_pct, ns); in update_numa_stats()
1828 static void task_numa_assign(struct task_numa_env *env, in task_numa_assign() argument
1831 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1834 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) { in task_numa_assign()
1836 int start = env->dst_cpu; in task_numa_assign()
1839 for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start) { in task_numa_assign()
1840 if (cpu == env->best_cpu || !idle_cpu(cpu) || in task_numa_assign()
1841 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { in task_numa_assign()
1845 env->dst_cpu = cpu; in task_numa_assign()
1846 rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1860 if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) { in task_numa_assign()
1861 rq = cpu_rq(env->best_cpu); in task_numa_assign()
1865 if (env->best_task) in task_numa_assign()
1866 put_task_struct(env->best_task); in task_numa_assign()
1870 env->best_task = p; in task_numa_assign()
1871 env->best_imp = imp; in task_numa_assign()
1872 env->best_cpu = env->dst_cpu; in task_numa_assign()
1876 struct task_numa_env *env) in load_too_imbalanced() argument
1889 src_capacity = env->src_stats.compute_capacity; in load_too_imbalanced()
1890 dst_capacity = env->dst_stats.compute_capacity; in load_too_imbalanced()
1894 orig_src_load = env->src_stats.load; in load_too_imbalanced()
1895 orig_dst_load = env->dst_stats.load; in load_too_imbalanced()
1916 static bool task_numa_compare(struct task_numa_env *env, in task_numa_compare() argument
1919 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); in task_numa_compare()
1920 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
1924 int dist = env->dist; in task_numa_compare()
1941 if (cur == env->p) { in task_numa_compare()
1947 if (maymove && moveimp >= env->best_imp) in task_numa_compare()
1954 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) in task_numa_compare()
1961 if (env->best_task && in task_numa_compare()
1962 env->best_task->numa_preferred_nid == env->src_nid && in task_numa_compare()
1963 cur->numa_preferred_nid != env->src_nid) { in task_numa_compare()
1985 if (env->dst_stats.node_type == node_has_spare) in task_numa_compare()
1988 imp = taskimp + task_weight(cur, env->src_nid, dist) - in task_numa_compare()
1989 task_weight(cur, env->dst_nid, dist); in task_numa_compare()
2002 imp += group_weight(cur, env->src_nid, dist) - in task_numa_compare()
2003 group_weight(cur, env->dst_nid, dist); in task_numa_compare()
2005 imp += task_weight(cur, env->src_nid, dist) - in task_numa_compare()
2006 task_weight(cur, env->dst_nid, dist); in task_numa_compare()
2010 if (cur->numa_preferred_nid == env->dst_nid) in task_numa_compare()
2019 if (cur->numa_preferred_nid == env->src_nid) in task_numa_compare()
2022 if (maymove && moveimp > imp && moveimp > env->best_imp) { in task_numa_compare()
2032 if (env->best_task && cur->numa_preferred_nid == env->src_nid && in task_numa_compare()
2033 env->best_task->numa_preferred_nid != env->src_nid) { in task_numa_compare()
2043 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2) in task_numa_compare()
2049 load = task_h_load(env->p) - task_h_load(cur); in task_numa_compare()
2053 dst_load = env->dst_stats.load + load; in task_numa_compare()
2054 src_load = env->src_stats.load - load; in task_numa_compare()
2056 if (load_too_imbalanced(src_load, dst_load, env)) in task_numa_compare()
2062 int cpu = env->dst_stats.idle_cpu; in task_numa_compare()
2066 cpu = env->dst_cpu; in task_numa_compare()
2072 if (!idle_cpu(cpu) && env->best_cpu >= 0 && in task_numa_compare()
2073 idle_cpu(env->best_cpu)) { in task_numa_compare()
2074 cpu = env->best_cpu; in task_numa_compare()
2077 env->dst_cpu = cpu; in task_numa_compare()
2080 task_numa_assign(env, cur, imp); in task_numa_compare()
2087 if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu)) in task_numa_compare()
2094 if (!maymove && env->best_task && in task_numa_compare()
2095 env->best_task->numa_preferred_nid == env->src_nid) { in task_numa_compare()
2104 static void task_numa_find_cpu(struct task_numa_env *env, in task_numa_find_cpu() argument
2114 if (env->dst_stats.node_type == node_has_spare) { in task_numa_find_cpu()
2124 src_running = env->src_stats.nr_running - 1; in task_numa_find_cpu()
2125 dst_running = env->dst_stats.nr_running + 1; in task_numa_find_cpu()
2128 env->imb_numa_nr); in task_numa_find_cpu()
2133 if (env->dst_stats.idle_cpu >= 0) { in task_numa_find_cpu()
2134 env->dst_cpu = env->dst_stats.idle_cpu; in task_numa_find_cpu()
2135 task_numa_assign(env, NULL, 0); in task_numa_find_cpu()
2145 load = task_h_load(env->p); in task_numa_find_cpu()
2146 dst_load = env->dst_stats.load + load; in task_numa_find_cpu()
2147 src_load = env->src_stats.load - load; in task_numa_find_cpu()
2148 maymove = !load_too_imbalanced(src_load, dst_load, env); in task_numa_find_cpu()
2151 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { in task_numa_find_cpu()
2153 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) in task_numa_find_cpu()
2156 env->dst_cpu = cpu; in task_numa_find_cpu()
2157 if (task_numa_compare(env, taskimp, groupimp, maymove)) in task_numa_find_cpu()
2164 struct task_numa_env env = { in task_numa_migrate() local
2192 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); in task_numa_migrate()
2194 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; in task_numa_migrate()
2195 env.imb_numa_nr = sd->imb_numa_nr; in task_numa_migrate()
2210 env.dst_nid = p->numa_preferred_nid; in task_numa_migrate()
2211 dist = env.dist = node_distance(env.src_nid, env.dst_nid); in task_numa_migrate()
2212 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2213 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2214 update_numa_stats(&env, &env.src_stats, env.src_nid, false); in task_numa_migrate()
2215 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; in task_numa_migrate()
2216 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; in task_numa_migrate()
2217 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); in task_numa_migrate()
2220 task_numa_find_cpu(&env, taskimp, groupimp); in task_numa_migrate()
2230 if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) { in task_numa_migrate()
2232 if (nid == env.src_nid || nid == p->numa_preferred_nid) in task_numa_migrate()
2235 dist = node_distance(env.src_nid, env.dst_nid); in task_numa_migrate()
2237 dist != env.dist) { in task_numa_migrate()
2238 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2239 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2248 env.dist = dist; in task_numa_migrate()
2249 env.dst_nid = nid; in task_numa_migrate()
2250 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); in task_numa_migrate()
2251 task_numa_find_cpu(&env, taskimp, groupimp); in task_numa_migrate()
2264 if (env.best_cpu == -1) in task_numa_migrate()
2265 nid = env.src_nid; in task_numa_migrate()
2267 nid = cpu_to_node(env.best_cpu); in task_numa_migrate()
2274 if (env.best_cpu == -1) { in task_numa_migrate()
2275 trace_sched_stick_numa(p, env.src_cpu, NULL, -1); in task_numa_migrate()
2279 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()
2280 if (env.best_task == NULL) { in task_numa_migrate()
2281 ret = migrate_task_to(p, env.best_cpu); in task_numa_migrate()
2284 trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu); in task_numa_migrate()
2288 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); in task_numa_migrate()
2292 trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu); in task_numa_migrate()
2293 put_task_struct(env.best_task); in task_numa_migrate()
8289 static int task_hot(struct task_struct *p, struct lb_env *env) in task_hot() argument
8293 lockdep_assert_rq_held(env->src_rq); in task_hot()
8302 if (env->sd->flags & SD_SHARE_CPUCAPACITY) in task_hot()
8308 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && in task_hot()
8320 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) in task_hot()
8326 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
8337 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) in migrate_degrades_locality() argument
8346 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) in migrate_degrades_locality()
8349 src_nid = cpu_to_node(env->src_cpu); in migrate_degrades_locality()
8350 dst_nid = cpu_to_node(env->dst_cpu); in migrate_degrades_locality()
8357 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) in migrate_degrades_locality()
8368 if (env->idle == CPU_IDLE) in migrate_degrades_locality()
8385 struct lb_env *env) in migrate_degrades_locality() argument
8395 int can_migrate_task(struct task_struct *p, struct lb_env *env) in can_migrate_task() argument
8400 lockdep_assert_rq_held(env->src_rq); in can_migrate_task()
8402 trace_android_rvh_can_migrate_task(p, env->dst_cpu, &can_migrate); in can_migrate_task()
8413 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) in can_migrate_task()
8420 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { in can_migrate_task()
8425 env->flags |= LBF_SOME_PINNED; in can_migrate_task()
8437 if (env->idle == CPU_NEWLY_IDLE || in can_migrate_task()
8438 env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB)) in can_migrate_task()
8442 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { in can_migrate_task()
8444 env->flags |= LBF_DST_PINNED; in can_migrate_task()
8445 env->new_dst_cpu = cpu; in can_migrate_task()
8454 env->flags &= ~LBF_ALL_PINNED; in can_migrate_task()
8456 if (task_on_cpu(env->src_rq, p)) { in can_migrate_task()
8468 if (env->flags & LBF_ACTIVE_LB) in can_migrate_task()
8471 tsk_cache_hot = migrate_degrades_locality(p, env); in can_migrate_task()
8473 tsk_cache_hot = task_hot(p, env); in can_migrate_task()
8476 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { in can_migrate_task()
8478 schedstat_inc(env->sd->lb_hot_gained[env->idle]); in can_migrate_task()
8491 static void detach_task(struct task_struct *p, struct lb_env *env) in detach_task() argument
8495 lockdep_assert_rq_held(env->src_rq); in detach_task()
8502 trace_android_rvh_migrate_queued_task(env->src_rq, env->src_rq_rf, p, in detach_task()
8503 env->dst_cpu, &detached); in detach_task()
8507 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); in detach_task()
8508 set_task_cpu(p, env->dst_cpu); in detach_task()
8517 static struct task_struct *detach_one_task(struct lb_env *env) in detach_one_task() argument
8521 lockdep_assert_rq_held(env->src_rq); in detach_one_task()
8524 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
8525 if (!can_migrate_task(p, env)) in detach_one_task()
8528 detach_task(p, env); in detach_one_task()
8536 schedstat_inc(env->sd->lb_gained[env->idle]); in detach_one_task()
8548 static int detach_tasks(struct lb_env *env) in detach_tasks() argument
8550 struct list_head *tasks = &env->src_rq->cfs_tasks; in detach_tasks()
8555 lockdep_assert_rq_held(env->src_rq); in detach_tasks()
8561 if (env->src_rq->nr_running <= 1) { in detach_tasks()
8562 env->flags &= ~LBF_ALL_PINNED; in detach_tasks()
8566 if (env->imbalance <= 0) in detach_tasks()
8574 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) in detach_tasks()
8577 env->loop++; in detach_tasks()
8582 if (env->loop > env->loop_max && in detach_tasks()
8583 !(env->flags & LBF_ALL_PINNED)) in detach_tasks()
8587 if (env->loop > env->loop_break) { in detach_tasks()
8588 env->loop_break += SCHED_NR_MIGRATE_BREAK; in detach_tasks()
8589 env->flags |= LBF_NEED_BREAK; in detach_tasks()
8595 if (!can_migrate_task(p, env)) in detach_tasks()
8598 switch (env->migration_type) { in detach_tasks()
8610 load < 16 && !env->sd->nr_balance_failed) in detach_tasks()
8619 if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance) in detach_tasks()
8622 env->imbalance -= load; in detach_tasks()
8628 if (util > env->imbalance) in detach_tasks()
8631 env->imbalance -= util; in detach_tasks()
8635 env->imbalance--; in detach_tasks()
8640 if (task_fits_cpu(p, env->src_cpu)) in detach_tasks()
8643 env->imbalance = 0; in detach_tasks()
8647 detach_task(p, env); in detach_tasks()
8648 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
8658 if (env->idle == CPU_NEWLY_IDLE) in detach_tasks()
8666 if (env->imbalance <= 0) in detach_tasks()
8679 schedstat_add(env->sd->lb_gained[env->idle], detached); in detach_tasks()
8714 static void attach_tasks(struct lb_env *env) in attach_tasks() argument
8716 struct list_head *tasks = &env->tasks; in attach_tasks()
8720 rq_lock(env->dst_rq, &rf); in attach_tasks()
8721 update_rq_clock(env->dst_rq); in attach_tasks()
8727 attach_task(env->dst_rq, p); in attach_tasks()
8730 rq_unlock(env->dst_rq, &rf); in attach_tasks()
9322 sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs, in sched_asym() argument
9328 return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group); in sched_asym()
9330 return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu); in sched_asym()
9354 static inline void update_sg_lb_stats(struct lb_env *env, in update_sg_lb_stats() argument
9366 for_each_cpu_and(i, sched_group_span(group), env->cpus) { in update_sg_lb_stats()
9400 if (env->sd->flags & SD_ASYM_CPUCAPACITY) { in update_sg_lb_stats()
9406 } else if ((env->idle != CPU_NOT_IDLE) && in update_sg_lb_stats()
9407 sched_reduced_capacity(rq, env->sd)) { in update_sg_lb_stats()
9419 if (!local_group && env->sd->flags & SD_ASYM_PACKING && in update_sg_lb_stats()
9420 env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && in update_sg_lb_stats()
9421 sched_asym(env, sds, sgs, group)) { in update_sg_lb_stats()
9425 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); in update_sg_lb_stats()
9446 static bool update_sd_pick_busiest(struct lb_env *env, in update_sd_pick_busiest() argument
9463 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && in update_sd_pick_busiest()
9465 (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) || in update_sd_pick_busiest()
9547 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && in update_sd_pick_busiest()
9549 (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu)))) in update_sd_pick_busiest()
9917 static void update_idle_cpu_scan(struct lb_env *env, in update_idle_cpu_scan() argument
9931 if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE) in update_idle_cpu_scan()
9934 llc_weight = per_cpu(sd_llc_size, env->dst_cpu); in update_idle_cpu_scan()
9935 if (env->sd->span_weight != llc_weight) in update_idle_cpu_scan()
9938 sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu)); in update_idle_cpu_scan()
9975 pct = env->sd->imbalance_pct; in update_idle_cpu_scan()
9994 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) in update_sd_lb_stats() argument
9996 struct sched_domain *child = env->sd->child; in update_sd_lb_stats()
9997 struct sched_group *sg = env->sd->groups; in update_sd_lb_stats()
10007 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); in update_sd_lb_stats()
10012 if (env->idle != CPU_NEWLY_IDLE || in update_sd_lb_stats()
10014 update_group_capacity(env->sd, env->dst_cpu); in update_sd_lb_stats()
10017 update_sg_lb_stats(env, sds, sg, sgs, &sg_status); in update_sd_lb_stats()
10023 if (update_sd_pick_busiest(env, sds, sg, sgs)) { in update_sd_lb_stats()
10035 } while (sg != env->sd->groups); in update_sd_lb_stats()
10041 if (env->sd->flags & SD_NUMA) in update_sd_lb_stats()
10042 env->fbq_type = fbq_classify_group(&sds->busiest_stat); in update_sd_lb_stats()
10044 if (!env->sd->parent) { in update_sd_lb_stats()
10045 struct root_domain *rd = env->dst_rq->rd; in update_sd_lb_stats()
10054 struct root_domain *rd = env->dst_rq->rd; in update_sd_lb_stats()
10060 update_idle_cpu_scan(env, sum_util); in update_sd_lb_stats()
10069 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) in calculate_imbalance() argument
10077 if (env->sd->flags & SD_ASYM_CPUCAPACITY) { in calculate_imbalance()
10079 env->migration_type = migrate_misfit; in calculate_imbalance()
10080 env->imbalance = 1; in calculate_imbalance()
10086 env->migration_type = migrate_load; in calculate_imbalance()
10087 env->imbalance = busiest->group_misfit_task_load; in calculate_imbalance()
10097 env->migration_type = migrate_task; in calculate_imbalance()
10098 env->imbalance = busiest->sum_h_nr_running; in calculate_imbalance()
10109 env->migration_type = migrate_task; in calculate_imbalance()
10110 env->imbalance = 1; in calculate_imbalance()
10120 !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) { in calculate_imbalance()
10129 env->migration_type = migrate_util; in calculate_imbalance()
10130 env->imbalance = max(local->group_capacity, local->group_util) - in calculate_imbalance()
10140 if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) { in calculate_imbalance()
10141 env->migration_type = migrate_task; in calculate_imbalance()
10142 env->imbalance = 1; in calculate_imbalance()
10154 env->migration_type = migrate_task; in calculate_imbalance()
10156 env->imbalance = nr_diff; in calculate_imbalance()
10163 env->migration_type = migrate_task; in calculate_imbalance()
10164 env->imbalance = max_t(long, 0, in calculate_imbalance()
10170 if (env->sd->flags & SD_NUMA) { in calculate_imbalance()
10171 env->imbalance = adjust_numa_imbalance(env->imbalance, in calculate_imbalance()
10173 env->sd->imb_numa_nr); in calculate_imbalance()
10178 env->imbalance >>= 1; in calculate_imbalance()
10201 env->imbalance = 0; in calculate_imbalance()
10213 env->imbalance = 0; in calculate_imbalance()
10227 env->migration_type = migrate_load; in calculate_imbalance()
10228 env->imbalance = min( in calculate_imbalance()
10266 static struct sched_group *find_busiest_group(struct lb_env *env) in find_busiest_group() argument
10277 update_sd_lb_stats(env, &sds); in find_busiest_group()
10290 struct root_domain *rd = env->dst_rq->rd; in find_busiest_group()
10293 trace_android_rvh_find_busiest_group(sds.busiest, env->dst_rq, in find_busiest_group()
10348 env->sd->imbalance_pct * local->avg_load) in find_busiest_group()
10358 if (env->idle == CPU_NOT_IDLE) in find_busiest_group()
10388 calculate_imbalance(env, &sds); in find_busiest_group()
10389 return env->imbalance ? sds.busiest : NULL; in find_busiest_group()
10392 env->imbalance = 0; in find_busiest_group()
10399 static struct rq *find_busiest_queue(struct lb_env *env, in find_busiest_queue() argument
10407 trace_android_rvh_find_busiest_queue(env->dst_cpu, group, env->cpus, in find_busiest_queue()
10412 for_each_cpu_and(i, sched_group_span(group), env->cpus) { in find_busiest_queue()
10439 if (rt > env->fbq_type) in find_busiest_queue()
10454 if (env->sd->flags & SD_ASYM_CPUCAPACITY && in find_busiest_queue()
10455 !capacity_greater(capacity_of(env->dst_cpu), capacity) && in find_busiest_queue()
10460 if ((env->sd->flags & SD_ASYM_PACKING) && in find_busiest_queue()
10461 sched_asym_prefer(i, env->dst_cpu) && in find_busiest_queue()
10465 switch (env->migration_type) { in find_busiest_queue()
10473 if (nr_running == 1 && load > env->imbalance && in find_busiest_queue()
10474 !check_cpu_capacity(rq, env->sd)) in find_busiest_queue()
10546 asym_active_balance(struct lb_env *env) in asym_active_balance() argument
10553 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && in asym_active_balance()
10554 sched_asym_prefer(env->dst_cpu, env->src_cpu); in asym_active_balance()
10558 imbalanced_active_balance(struct lb_env *env) in imbalanced_active_balance() argument
10560 struct sched_domain *sd = env->sd; in imbalanced_active_balance()
10567 if ((env->migration_type == migrate_task) && in imbalanced_active_balance()
10574 static int need_active_balance(struct lb_env *env) in need_active_balance() argument
10576 struct sched_domain *sd = env->sd; in need_active_balance()
10578 if (asym_active_balance(env)) in need_active_balance()
10581 if (imbalanced_active_balance(env)) in need_active_balance()
10590 if ((env->idle != CPU_NOT_IDLE) && in need_active_balance()
10591 (env->src_rq->cfs.h_nr_running == 1)) { in need_active_balance()
10592 if ((check_cpu_capacity(env->src_rq, sd)) && in need_active_balance()
10593 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) in need_active_balance()
10597 if (env->migration_type == migrate_misfit) in need_active_balance()
10605 static int should_we_balance(struct lb_env *env) in should_we_balance() argument
10607 struct sched_group *sg = env->sd->groups; in should_we_balance()
10614 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) in should_we_balance()
10624 if (env->idle == CPU_NEWLY_IDLE) { in should_we_balance()
10625 if (env->dst_rq->nr_running > 0 || env->dst_rq->ttwu_pending) in should_we_balance()
10631 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { in should_we_balance()
10636 return cpu == env->dst_cpu; in should_we_balance()
10640 return group_balance_cpu(sg) == env->dst_cpu; in should_we_balance()
10657 struct lb_env env = { in load_balance() local
10666 .tasks = LIST_HEAD_INIT(env.tasks), in load_balance()
10674 if (!should_we_balance(&env)) { in load_balance()
10679 group = find_busiest_group(&env); in load_balance()
10685 busiest = find_busiest_queue(&env, group); in load_balance()
10691 WARN_ON_ONCE(busiest == env.dst_rq); in load_balance()
10693 schedstat_add(sd->lb_imbalance[idle], env.imbalance); in load_balance()
10695 env.src_cpu = busiest->cpu; in load_balance()
10696 env.src_rq = busiest; in load_balance()
10700 env.flags |= LBF_ALL_PINNED; in load_balance()
10708 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); in load_balance()
10712 env.src_rq_rf = &rf; in load_balance()
10719 cur_ld_moved = detach_tasks(&env); in load_balance()
10732 attach_tasks(&env); in load_balance()
10738 if (env.flags & LBF_NEED_BREAK) { in load_balance()
10739 env.flags &= ~LBF_NEED_BREAK; in load_balance()
10741 if (env.loop < busiest->nr_running) in load_balance()
10764 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { in load_balance()
10767 __cpumask_clear_cpu(env.dst_cpu, env.cpus); in load_balance()
10769 env.dst_rq = cpu_rq(env.new_dst_cpu); in load_balance()
10770 env.dst_cpu = env.new_dst_cpu; in load_balance()
10771 env.flags &= ~LBF_DST_PINNED; in load_balance()
10772 env.loop = 0; in load_balance()
10773 env.loop_break = SCHED_NR_MIGRATE_BREAK; in load_balance()
10788 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) in load_balance()
10793 if (unlikely(env.flags & LBF_ALL_PINNED)) { in load_balance()
10803 if (!cpumask_subset(cpus, env.dst_grpmask)) { in load_balance()
10804 env.loop = 0; in load_balance()
10805 env.loop_break = SCHED_NR_MIGRATE_BREAK; in load_balance()
10823 if (need_active_balance(&env)) { in load_balance()
10839 env.flags &= ~LBF_ALL_PINNED; in load_balance()
10865 if (likely(!active_balance) || need_active_balance(&env)) { in load_balance()
10878 if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { in load_balance()
10904 if (env.idle == CPU_NEWLY_IDLE) in load_balance()
10908 if ((env.flags & LBF_ALL_PINNED && in load_balance()
11002 struct lb_env env = { in active_load_balance_cpu_stop() local
11016 p = detach_one_task(&env); in active_load_balance_cpu_stop()