• Home
  • Raw
  • Download

Lines Matching full:env

1668 static void update_numa_stats(struct task_numa_env *env, struct numa_stats *ns, int nid, bool find_…  in update_numa_stats()  argument
1687 if (READ_ONCE(rq->numa_migrate_on) || !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { in update_numa_stats()
1702 ns->node_type = numa_classify(env->imbalance_pct, ns); in update_numa_stats()
1709 static void task_numa_assign(struct task_numa_env *env, struct task_struct *p, long imp) in task_numa_assign() argument
1711 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1714 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) { in task_numa_assign()
1716 int start = env->dst_cpu; in task_numa_assign()
1719 for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start) in task_numa_assign()
1721 … if (cpu == env->best_cpu || !idle_cpu(cpu) || !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { in task_numa_assign()
1725 env->dst_cpu = cpu; in task_numa_assign()
1726 rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1741 if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) { in task_numa_assign()
1742 rq = cpu_rq(env->best_cpu); in task_numa_assign()
1746 if (env->best_task) { in task_numa_assign()
1747 put_task_struct(env->best_task); in task_numa_assign()
1753 env->best_task = p; in task_numa_assign()
1754 env->best_imp = imp; in task_numa_assign()
1755 env->best_cpu = env->dst_cpu; in task_numa_assign()
1758 static bool load_too_imbalanced(long src_load, long dst_load, struct task_numa_env *env) in load_too_imbalanced() argument
1771 src_capacity = env->src_stats.compute_capacity; in load_too_imbalanced()
1772 dst_capacity = env->dst_stats.compute_capacity; in load_too_imbalanced()
1776 orig_src_load = env->src_stats.load; in load_too_imbalanced()
1777 orig_dst_load = env->dst_stats.load; in load_too_imbalanced()
1798 static bool task_numa_compare(struct task_numa_env *env, long taskimp, long groupimp, bool maymove) in task_numa_compare() argument
1800 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); in task_numa_compare()
1801 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
1805 int dist = env->dist; in task_numa_compare()
1822 * end try selecting ourselves (current == env->p) as a swap candidate. in task_numa_compare()
1824 if (cur == env->p) { in task_numa_compare()
1830 if (maymove && moveimp >= env->best_imp) { in task_numa_compare()
1838 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) { in task_numa_compare()
1846 if (env->best_task && env->best_task->numa_preferred_nid == env->src_nid && in task_numa_compare()
1847 cur->numa_preferred_nid != env->src_nid) { in task_numa_compare()
1863 imp = taskimp + task_weight(cur, env->src_nid, dist) - task_weight(cur, env->dst_nid, dist); in task_numa_compare()
1877 imp += group_weight(cur, env->src_nid, dist) - group_weight(cur, env->dst_nid, dist); in task_numa_compare()
1879 imp += task_weight(cur, env->src_nid, dist) - task_weight(cur, env->dst_nid, dist); in task_numa_compare()
1884 if (cur->numa_preferred_nid == env->dst_nid) { in task_numa_compare()
1894 if (cur->numa_preferred_nid == env->src_nid) { in task_numa_compare()
1898 if (maymove && moveimp > imp && moveimp > env->best_imp) { in task_numa_compare()
1908 if (env->best_task && cur->numa_preferred_nid == env->src_nid && in task_numa_compare()
1909 env->best_task->numa_preferred_nid != env->src_nid) { in task_numa_compare()
1919 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 0x2) { in task_numa_compare()
1926 load = task_h_load(env->p) - task_h_load(cur); in task_numa_compare()
1931 dst_load = env->dst_stats.load + load; in task_numa_compare()
1932 src_load = env->src_stats.load - load; in task_numa_compare()
1934 if (load_too_imbalanced(src_load, dst_load, env)) { in task_numa_compare()
1941 int cpu = env->dst_stats.idle_cpu; in task_numa_compare()
1945 cpu = env->dst_cpu; in task_numa_compare()
1952 if (!idle_cpu(cpu) && env->best_cpu >= 0 && idle_cpu(env->best_cpu)) { in task_numa_compare()
1953 cpu = env->best_cpu; in task_numa_compare()
1956 env->dst_cpu = cpu; in task_numa_compare()
1959 task_numa_assign(env, cur, imp); in task_numa_compare()
1966 if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu)) { in task_numa_compare()
1974 if (!maymove && env->best_task && env->best_task->numa_preferred_nid == env->src_nid) { in task_numa_compare()
1983 static void task_numa_find_cpu(struct task_numa_env *env, long taskimp, long groupimp) in task_numa_find_cpu() argument
1992 if (env->dst_stats.node_type == node_has_spare) { in task_numa_find_cpu()
2002 src_running = env->src_stats.nr_running - 1; in task_numa_find_cpu()
2003 dst_running = env->dst_stats.nr_running + 1; in task_numa_find_cpu()
2009 if (env->dst_stats.idle_cpu >= 0) { in task_numa_find_cpu()
2010 env->dst_cpu = env->dst_stats.idle_cpu; in task_numa_find_cpu()
2011 task_numa_assign(env, NULL, 0); in task_numa_find_cpu()
2018 * If the improvement from just moving env->p direction is better in task_numa_find_cpu()
2021 load = task_h_load(env->p); in task_numa_find_cpu()
2022 dst_load = env->dst_stats.load + load; in task_numa_find_cpu()
2023 src_load = env->src_stats.load - load; in task_numa_find_cpu()
2024 maymove = !load_too_imbalanced(src_load, dst_load, env); in task_numa_find_cpu()
2027 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) in task_numa_find_cpu()
2030 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) { in task_numa_find_cpu()
2034 env->dst_cpu = cpu; in task_numa_find_cpu()
2035 if (task_numa_compare(env, taskimp, groupimp, maymove)) { in task_numa_find_cpu()
2043 struct task_numa_env env = { in task_numa_migrate() local
2071 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); in task_numa_migrate()
2073 env.imbalance_pct = FAIR_ONEHUNDRED + (sd->imbalance_pct - FAIR_ONEHUNDRED) / 0x2; in task_numa_migrate()
2088 env.dst_nid = p->numa_preferred_nid; in task_numa_migrate()
2089 dist = env.dist = node_distance(env.src_nid, env.dst_nid); in task_numa_migrate()
2090 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2091 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2092 update_numa_stats(&env, &env.src_stats, env.src_nid, false); in task_numa_migrate()
2093 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; in task_numa_migrate()
2094 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; in task_numa_migrate()
2095 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); in task_numa_migrate()
2098 task_numa_find_cpu(&env, taskimp, groupimp); in task_numa_migrate()
2108 if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) { in task_numa_migrate()
2111 if (nid == env.src_nid || nid == p->numa_preferred_nid) { in task_numa_migrate()
2115 dist = node_distance(env.src_nid, env.dst_nid); in task_numa_migrate()
2116 if (sched_numa_topology_type == NUMA_BACKPLANE && dist != env.dist) { in task_numa_migrate()
2117 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2118 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2128 env.dist = dist; in task_numa_migrate()
2129 env.dst_nid = nid; in task_numa_migrate()
2130 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); in task_numa_migrate()
2131 task_numa_find_cpu(&env, taskimp, groupimp); in task_numa_migrate()
2144 if (env.best_cpu == -1) { in task_numa_migrate()
2145 nid = env.src_nid; in task_numa_migrate()
2147 nid = cpu_to_node(env.best_cpu); in task_numa_migrate()
2156 if (env.best_cpu == -1) { in task_numa_migrate()
2157 trace_sched_stick_numa(p, env.src_cpu, NULL, -1); in task_numa_migrate()
2161 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()
2162 if (env.best_task == NULL) { in task_numa_migrate()
2163 ret = migrate_task_to(p, env.best_cpu); in task_numa_migrate()
2166 trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu); in task_numa_migrate()
2171 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); in task_numa_migrate()
2175 trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu); in task_numa_migrate()
2177 put_task_struct(env.best_task); in task_numa_migrate()
7886 static int task_hot(struct task_struct *p, struct lb_env *env) in task_hot() argument
7890 lockdep_assert_held(&env->src_rq->lock); in task_hot()
7901 if (env->sd->flags & SD_SHARE_CPUCAPACITY) { in task_hot()
7908 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && in task_hot()
7920 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
7931 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) in migrate_degrades_locality() argument
7941 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) { in migrate_degrades_locality()
7945 src_nid = cpu_to_node(env->src_cpu); in migrate_degrades_locality()
7946 dst_nid = cpu_to_node(env->dst_cpu); in migrate_degrades_locality()
7953 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) { in migrate_degrades_locality()
7966 if (env->idle == CPU_IDLE) { in migrate_degrades_locality()
7983 static inline int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) in migrate_degrades_locality() argument
7992 static int can_migrate_task(struct task_struct *p, struct lb_env *env) in can_migrate_task() argument
7996 lockdep_assert_held(&env->src_rq->lock); in can_migrate_task()
8005 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) { in can_migrate_task()
8014 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { in can_migrate_task()
8019 env->flags |= LBF_SOME_PINNED; in can_migrate_task()
8029 if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) { in can_migrate_task()
8033 /* Prevent to re-select dst_cpu via env's CPUs: */ in can_migrate_task()
8034 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) in can_migrate_task()
8037 env->flags |= LBF_DST_PINNED; in can_migrate_task()
8038 env->new_dst_cpu = cpu; in can_migrate_task()
8047 env->flags &= ~LBF_ALL_PINNED; in can_migrate_task()
8050 …if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS && !preferred_cluster(cpu_rq(env->dst_cpu)->cl… in can_migrate_task()
8055 if (task_running(env->src_rq, p)) { in can_migrate_task()
8066 tsk_cache_hot = migrate_degrades_locality(p, env); in can_migrate_task()
8068 tsk_cache_hot = task_hot(p, env); in can_migrate_task()
8071 if (tsk_cache_hot <= 0 || env->sd->nr_balance_failed > env->sd->cache_nice_tries) { in can_migrate_task()
8073 schedstat_inc(env->sd->lb_hot_gained[env->idle]); in can_migrate_task()
8084 * detach_task() -- detach the task for the migration specified in env
8086 static void detach_task(struct task_struct *p, struct lb_env *env) in detach_task() argument
8088 lockdep_assert_held(&env->src_rq->lock); in detach_task()
8090 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); in detach_task()
8092 double_lock_balance(env->src_rq, env->dst_rq); in detach_task()
8093 if (!(env->src_rq->clock_update_flags & RQCF_UPDATED)) { in detach_task()
8094 update_rq_clock(env->src_rq); in detach_task()
8097 set_task_cpu(p, env->dst_cpu); in detach_task()
8099 double_unlock_balance(env->src_rq, env->dst_rq); in detach_task()
8104 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
8109 static struct task_struct *detach_one_task(struct lb_env *env) in detach_one_task() argument
8113 lockdep_assert_held(&env->src_rq->lock); in detach_one_task()
8115 list_for_each_entry_reverse(p, &env->src_rq->cfs_tasks, se.group_node) in detach_one_task()
8117 if (!can_migrate_task(p, env)) { in detach_one_task()
8121 detach_task(p, env); in detach_one_task()
8125 * lb_gained[env->idle] is updated (other is detach_tasks) in detach_one_task()
8129 schedstat_inc(env->sd->lb_gained[env->idle]); in detach_one_task()
8143 static int detach_tasks(struct lb_env *env) in detach_tasks() argument
8145 struct list_head *tasks = &env->src_rq->cfs_tasks; in detach_tasks()
8150 int orig_loop = env->loop; in detach_tasks()
8153 lockdep_assert_held(&env->src_rq->lock); in detach_tasks()
8155 if (env->imbalance <= 0) { in detach_tasks()
8160 if (!same_cluster(env->dst_cpu, env->src_cpu)) { in detach_tasks()
8161 env->flags |= LBF_IGNORE_PREFERRED_CLUSTER_TASKS; in detach_tasks()
8171 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) { in detach_tasks()
8177 env->loop++; in detach_tasks()
8179 if (env->loop > env->loop_max) { in detach_tasks()
8184 if (env->loop > env->loop_break) { in detach_tasks()
8185 env->loop_break += sched_nr_migrate_break; in detach_tasks()
8186 env->flags |= LBF_NEED_BREAK; in detach_tasks()
8190 if (!can_migrate_task(p, env)) { in detach_tasks()
8194 switch (env->migration_type) { in detach_tasks()
8199 * value. Make sure that env->imbalance decreases in detach_tasks()
8205 if (sched_feat(LB_MIN) && load < 0x10 && !env->sd->nr_balance_failed) { in detach_tasks()
8215 if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance) { in detach_tasks()
8219 env->imbalance -= load; in detach_tasks()
8224 if (util > env->imbalance) { in detach_tasks()
8228 env->imbalance -= util; in detach_tasks()
8232 env->imbalance--; in detach_tasks()
8237 if (task_fits_capacity(p, capacity_of(env->src_cpu))) { in detach_tasks()
8241 env->imbalance = 0; in detach_tasks()
8245 detach_task(p, env); in detach_tasks()
8246 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
8256 if (env->idle == CPU_NEWLY_IDLE) { in detach_tasks()
8265 if (env->imbalance <= 0) { in detach_tasks()
8275 if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS && !detached) { in detach_tasks()
8276 tasks = &env->src_rq->cfs_tasks; in detach_tasks()
8277 env->flags &= ~LBF_IGNORE_PREFERRED_CLUSTER_TASKS; in detach_tasks()
8278 env->loop = orig_loop; in detach_tasks()
8288 schedstat_add(env->sd->lb_gained[env->idle], detached); in detach_tasks()
8323 static void attach_tasks(struct lb_env *env) in attach_tasks() argument
8325 struct list_head *tasks = &env->tasks; in attach_tasks()
8329 rq_lock(env->dst_rq, &rf); in attach_tasks()
8330 update_rq_clock(env->dst_rq); in attach_tasks()
8336 attach_task(env->dst_rq, p); in attach_tasks()
8339 rq_unlock(env->dst_rq, &rf); in attach_tasks()
8932 * @env: The load balancing environment.
8937 static inline void update_sg_lb_stats(struct lb_env *env, struct sched_group *group, struct sg_lb_s… in update_sg_lb_stats() argument
8944 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group)); in update_sg_lb_stats()
8946 for_each_cpu_and(i, sched_group_span(group), env->cpus) in update_sg_lb_stats()
8954 if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) { in update_sg_lb_stats()
8955 env->flags |= LBF_NOHZ_AGAIN; in update_sg_lb_stats()
8992 … if (env->sd->flags & SD_ASYM_CPUCAPACITY && sgs->group_misfit_task_load < rq->misfit_task_load) { in update_sg_lb_stats()
9008 if (env->sd->flags & SD_ASYM_PACKING && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && in update_sg_lb_stats()
9009 sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) { in update_sg_lb_stats()
9017 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); in update_sg_lb_stats()
9027 * @env: The load balancing environment.
9038 static bool update_sd_pick_busiest(struct lb_env *env, struct sd_lb_stats *sds, struct sched_group … in update_sd_pick_busiest() argument
9143 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && (sgs->group_type <= group_fully_busy) && in update_sd_pick_busiest()
9508 * @env: The load balancing environment.
9512 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) in update_sd_lb_stats() argument
9514 struct sched_domain *child = env->sd->child; in update_sd_lb_stats()
9515 struct sched_group *sg = env->sd->groups; in update_sd_lb_stats()
9521 if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) { in update_sd_lb_stats()
9522 env->flags |= LBF_NOHZ_STATS; in update_sd_lb_stats()
9530 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); in update_sd_lb_stats()
9535 if (env->idle != CPU_NEWLY_IDLE || time_after_eq(jiffies, sg->sgc->next_update)) { in update_sd_lb_stats()
9536 update_group_capacity(env->sd, env->dst_cpu); in update_sd_lb_stats()
9540 update_sg_lb_stats(env, sg, sgs, &sg_status); in update_sd_lb_stats()
9546 if (update_sd_pick_busiest(env, sds, sg, sgs)) { in update_sd_lb_stats()
9557 } while (sg != env->sd->groups); in update_sd_lb_stats()
9563 …if ((env->flags & LBF_NOHZ_AGAIN) && cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd… in update_sd_lb_stats()
9568 if (env->sd->flags & SD_NUMA) { in update_sd_lb_stats()
9569 env->fbq_type = fbq_classify_group(&sds->busiest_stat); in update_sd_lb_stats()
9572 if (!env->sd->parent) { in update_sd_lb_stats()
9573 struct root_domain *rd = env->dst_rq->rd; in update_sd_lb_stats()
9582 struct root_domain *rd = env->dst_rq->rd; in update_sd_lb_stats()
9608 * @env: load balance environment
9611 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) in calculate_imbalance() argument
9620 env->migration_type = migrate_misfit; in calculate_imbalance()
9621 env->imbalance = 1; in calculate_imbalance()
9630 env->migration_type = migrate_task; in calculate_imbalance()
9631 env->imbalance = busiest->sum_h_nr_running; in calculate_imbalance()
9642 env->migration_type = migrate_task; in calculate_imbalance()
9643 env->imbalance = 1; in calculate_imbalance()
9652 … if ((busiest->group_type > group_fully_busy) && !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) { in calculate_imbalance()
9661 env->migration_type = migrate_util; in calculate_imbalance()
9662 env->imbalance = max(local->group_capacity, local->group_util) - local->group_util; in calculate_imbalance()
9671 if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) { in calculate_imbalance()
9672 env->migration_type = migrate_task; in calculate_imbalance()
9673 env->imbalance = 1; in calculate_imbalance()
9685 env->migration_type = migrate_task; in calculate_imbalance()
9687 env->imbalance = nr_diff >> 1; in calculate_imbalance()
9693 env->migration_type = migrate_task; in calculate_imbalance()
9694 env->imbalance = max_t(long, 0, (local->idle_cpus - busiest->idle_cpus) >> 1); in calculate_imbalance()
9698 if (env->sd->flags & SD_NUMA) { in calculate_imbalance()
9699 env->imbalance = adjust_numa_imbalance(env->imbalance, busiest->sum_nr_running); in calculate_imbalance()
9723 env->imbalance = 0; in calculate_imbalance()
9736 env->migration_type = migrate_load; in calculate_imbalance()
9737 env->imbalance = min((busiest->avg_load - sds->avg_load) * busiest->group_capacity, in calculate_imbalance()
9771 * @env: The load balancing environment.
9775 static struct sched_group *find_busiest_group(struct lb_env *env) in find_busiest_group() argument
9786 update_sd_lb_stats(env, &sds); in find_busiest_group()
9789 struct root_domain *rd = env->dst_rq->rd; in find_busiest_group()
9859 if (FAIR_ONEHUNDRED * busiest->avg_load <= env->sd->imbalance_pct * local->avg_load) { in find_busiest_group()
9871 if (env->idle == CPU_NOT_IDLE) { in find_busiest_group()
9903 calculate_imbalance(env, &sds); in find_busiest_group()
9904 return env->imbalance ? sds.busiest : NULL; in find_busiest_group()
9907 env->imbalance = 0; in find_busiest_group()
9914 static struct rq *find_busiest_queue(struct lb_env *env, struct sched_group *group) in find_busiest_queue() argument
9921 for_each_cpu_and(i, sched_group_span(group), env->cpus) in find_busiest_queue()
9948 if (rt > env->fbq_type) { in find_busiest_queue()
9965 …if (env->sd->flags & SD_ASYM_CPUCAPACITY && capacity_of(env->dst_cpu) < capacity && nr_running == … in find_busiest_queue()
9969 switch (env->migration_type) { in find_busiest_queue()
9976 if (nr_running == 1 && load > env->imbalance && !check_cpu_capacity(rq, env->sd)) { in find_busiest_queue()
10048 static inline bool asym_active_balance(struct lb_env *env) in asym_active_balance() argument
10055 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && in asym_active_balance()
10056 sched_asym_prefer(env->dst_cpu, env->src_cpu); in asym_active_balance()
10059 static inline bool voluntary_active_balance(struct lb_env *env) in voluntary_active_balance() argument
10061 struct sched_domain *sd = env->sd; in voluntary_active_balance()
10063 if (asym_active_balance(env)) { in voluntary_active_balance()
10073 if ((env->idle != CPU_NOT_IDLE) && (env->src_rq->cfs.h_nr_running == 1)) { in voluntary_active_balance()
10074 if ((check_cpu_capacity(env->src_rq, sd)) && in voluntary_active_balance()
10075 … (capacity_of(env->src_cpu) * sd->imbalance_pct < capacity_of(env->dst_cpu) * FAIR_ONEHUNDRED)) { in voluntary_active_balance()
10080 if (env->migration_type == migrate_misfit) { in voluntary_active_balance()
10087 static int need_active_balance(struct lb_env *env) in need_active_balance() argument
10089 struct sched_domain *sd = env->sd; in need_active_balance()
10091 if (voluntary_active_balance(env)) { in need_active_balance()
10111 static int should_we_balance(struct lb_env *env) in should_we_balance() argument
10113 struct sched_group *sg = env->sd->groups; in should_we_balance()
10120 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) { in should_we_balance()
10128 if (env->idle == CPU_NEWLY_IDLE) { in should_we_balance()
10133 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) in should_we_balance()
10140 return cpu == env->dst_cpu; in should_we_balance()
10144 return group_balance_cpu_not_isolated(sg) == env->dst_cpu; in should_we_balance()
10161 struct lb_env env = { in load_balance() local
10170 .tasks = LIST_HEAD_INIT(env.tasks), in load_balance()
10178 if (!should_we_balance(&env)) { in load_balance()
10183 group = find_busiest_group(&env); in load_balance()
10189 busiest = find_busiest_queue(&env, group); in load_balance()
10195 BUG_ON(busiest == env.dst_rq); in load_balance()
10197 schedstat_add(sd->lb_imbalance[idle], env.imbalance); in load_balance()
10199 env.src_cpu = busiest->cpu; in load_balance()
10200 env.src_rq = busiest; in load_balance()
10210 env.flags |= LBF_ALL_PINNED; in load_balance()
10211 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); in load_balance()
10221 cur_ld_moved = detach_tasks(&env); in load_balance()
10234 attach_tasks(&env); in load_balance()
10240 if (env.flags & LBF_NEED_BREAK) { in load_balance()
10241 env.flags &= ~LBF_NEED_BREAK; in load_balance()
10264 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { in load_balance()
10265 /* Prevent to re-select dst_cpu via env's CPUs */ in load_balance()
10266 __cpumask_clear_cpu(env.dst_cpu, env.cpus); in load_balance()
10268 env.dst_rq = cpu_rq(env.new_dst_cpu); in load_balance()
10269 env.dst_cpu = env.new_dst_cpu; in load_balance()
10270 env.flags &= ~LBF_DST_PINNED; in load_balance()
10271 env.loop = 0; in load_balance()
10272 env.loop_break = sched_nr_migrate_break; in load_balance()
10287 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) { in load_balance()
10293 if (unlikely(env.flags & LBF_ALL_PINNED)) { in load_balance()
10303 if (!cpumask_subset(cpus, env.dst_grpmask)) { in load_balance()
10304 env.loop = 0; in load_balance()
10305 env.loop_break = sched_nr_migrate_break; in load_balance()
10324 if (need_active_balance(&env)) { in load_balance()
10336 env.flags |= LBF_ALL_PINNED; in load_balance()
10364 if (likely(!active_balance) || voluntary_active_balance(&env)) { in load_balance()
10387 if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { in load_balance()
10414 if (env.idle == CPU_NEWLY_IDLE) { in load_balance()
10419 if ((env.flags & LBF_ALL_PINNED && sd->balance_interval < MAX_PINNED_INTERVAL) || in load_balance()
10516 struct lb_env env = { in active_load_balance_cpu_stop() local
10529 detach_task(push_task, &env); in active_load_balance_cpu_stop()
10546 struct lb_env env = { in active_load_balance_cpu_stop() local
10565 p = detach_one_task(&env); in active_load_balance_cpu_stop()
10684 * env->dst_cpu, so we can't know our idle in rebalance_domains()