• Home
  • Raw
  • Download

Lines Matching refs:sd

2177 	struct sched_domain *sd;  in task_numa_migrate()  local
2192 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); in task_numa_migrate()
2193 if (sd) { in task_numa_migrate()
2194 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; in task_numa_migrate()
2195 env.imb_numa_nr = sd->imb_numa_nr; in task_numa_migrate()
2205 if (unlikely(!sd)) { in task_numa_migrate()
6468 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, in wake_affine_weight() argument
6495 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; in wake_affine_weight()
6510 static int wake_affine(struct sched_domain *sd, struct task_struct *p, in wake_affine() argument
6519 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); in wake_affine()
6525 schedstat_inc(sd->ttwu_move_affine); in wake_affine()
6531 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
6593 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, in find_idlest_cpu() argument
6598 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) in find_idlest_cpu()
6608 while (sd) { in find_idlest_cpu()
6613 if (!(sd->flags & sd_flag)) { in find_idlest_cpu()
6614 sd = sd->child; in find_idlest_cpu()
6618 group = find_idlest_group(sd, p, cpu); in find_idlest_cpu()
6620 sd = sd->child; in find_idlest_cpu()
6627 sd = sd->child; in find_idlest_cpu()
6633 weight = sd->span_weight; in find_idlest_cpu()
6634 sd = NULL; in find_idlest_cpu()
6639 sd = tmp; in find_idlest_cpu()
6786 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int … in select_idle_cpu() argument
6796 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_cpu()
6821 span_avg = sd->span_weight * avg_idle; in select_idle_cpu()
6880 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_capacity() argument
6888 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_capacity()
6948 struct sched_domain *sd; in select_idle_sibling() local
7013 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target)); in select_idle_sibling()
7022 if (sd) { in select_idle_sibling()
7023 i = select_idle_capacity(p, sd, target); in select_idle_sibling()
7028 sd = rcu_dereference(per_cpu(sd_llc, target)); in select_idle_sibling()
7029 if (!sd) in select_idle_sibling()
7042 i = select_idle_cpu(p, sd, has_idle_core, target); in select_idle_sibling()
7307 struct sched_domain *sd; in find_energy_efficient_cpu() local
7335 sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity)); in find_energy_efficient_cpu()
7336 while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) in find_energy_efficient_cpu()
7337 sd = sd->parent; in find_energy_efficient_cpu()
7338 if (!sd) in find_energy_efficient_cpu()
7375 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) in find_energy_efficient_cpu()
7512 struct sched_domain *tmp, *sd = NULL; in select_task_rq_fair() local
7556 sd = NULL; /* Prefer wake_affine over balance flags */ in select_task_rq_fair()
7566 sd = tmp; in select_task_rq_fair()
7571 if (unlikely(sd)) { in select_task_rq_fair()
7573 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); in select_task_rq_fair()
8259 struct sched_domain *sd; member
8302 if (env->sd->flags & SD_SHARE_CPUCAPACITY) in task_hot()
8346 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) in migrate_degrades_locality()
8476 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { in can_migrate_task()
8478 schedstat_inc(env->sd->lb_hot_gained[env->idle]); in can_migrate_task()
8536 schedstat_inc(env->sd->lb_gained[env->idle]); in detach_one_task()
8610 load < 16 && !env->sd->nr_balance_failed) in detach_tasks()
8619 if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance) in detach_tasks()
8679 schedstat_add(env->sd->lb_gained[env->idle], detached); in detach_tasks()
9029 static void update_cpu_capacity(struct sched_domain *sd, int cpu) in update_cpu_capacity() argument
9032 struct sched_group *sdg = sd->groups; in update_cpu_capacity()
9048 void update_group_capacity(struct sched_domain *sd, int cpu) in update_group_capacity() argument
9050 struct sched_domain *child = sd->child; in update_group_capacity()
9051 struct sched_group *group, *sdg = sd->groups; in update_group_capacity()
9055 interval = msecs_to_jiffies(sd->balance_interval); in update_group_capacity()
9060 update_cpu_capacity(sd, cpu); in update_group_capacity()
9109 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) in check_cpu_capacity() argument
9111 return ((rq->cpu_capacity * sd->imbalance_pct) < in check_cpu_capacity()
9120 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) in check_misfit_status() argument
9124 check_cpu_capacity(rq, sd)); in check_misfit_status()
9334 sched_reduced_capacity(struct rq *rq, struct sched_domain *sd) in sched_reduced_capacity() argument
9343 return check_cpu_capacity(rq, sd); in sched_reduced_capacity()
9400 if (env->sd->flags & SD_ASYM_CPUCAPACITY) { in update_sg_lb_stats()
9407 sched_reduced_capacity(rq, env->sd)) { in update_sg_lb_stats()
9419 if (!local_group && env->sd->flags & SD_ASYM_PACKING && in update_sg_lb_stats()
9425 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); in update_sg_lb_stats()
9463 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && in update_sd_pick_busiest()
9547 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && in update_sd_pick_busiest()
9639 static inline void update_sg_wakeup_stats(struct sched_domain *sd, in update_sg_wakeup_stats() argument
9649 if (sd->flags & SD_ASYM_CPUCAPACITY) in update_sg_wakeup_stats()
9672 if (sd->flags & SD_ASYM_CPUCAPACITY && in update_sg_wakeup_stats()
9683 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs); in update_sg_wakeup_stats()
9753 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) in find_idlest_group() argument
9755 struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups; in find_idlest_group()
9786 update_sg_wakeup_stats(sd, group, sgs, p); in find_idlest_group()
9793 } while (group = group->next, group != sd->groups); in find_idlest_group()
9824 (sd->imbalance_pct-100) / 100; in find_idlest_group()
9835 if ((sd->flags & SD_NUMA) && in find_idlest_group()
9846 if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load) in find_idlest_group()
9863 if (sd->flags & SD_NUMA) { in find_idlest_group()
9864 int imb_numa_nr = sd->imb_numa_nr; in find_idlest_group()
9891 imb_numa_nr = min(cpumask_weight(cpus), sd->imb_numa_nr); in find_idlest_group()
9935 if (env->sd->span_weight != llc_weight) in update_idle_cpu_scan()
9975 pct = env->sd->imbalance_pct; in update_idle_cpu_scan()
9996 struct sched_domain *child = env->sd->child; in update_sd_lb_stats()
9997 struct sched_group *sg = env->sd->groups; in update_sd_lb_stats()
10014 update_group_capacity(env->sd, env->dst_cpu); in update_sd_lb_stats()
10035 } while (sg != env->sd->groups); in update_sd_lb_stats()
10041 if (env->sd->flags & SD_NUMA) in update_sd_lb_stats()
10044 if (!env->sd->parent) { in update_sd_lb_stats()
10077 if (env->sd->flags & SD_ASYM_CPUCAPACITY) { in calculate_imbalance()
10120 !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) { in calculate_imbalance()
10170 if (env->sd->flags & SD_NUMA) { in calculate_imbalance()
10173 env->sd->imb_numa_nr); in calculate_imbalance()
10348 env->sd->imbalance_pct * local->avg_load) in find_busiest_group()
10454 if (env->sd->flags & SD_ASYM_CPUCAPACITY && in find_busiest_queue()
10460 if ((env->sd->flags & SD_ASYM_PACKING) && in find_busiest_queue()
10474 !check_cpu_capacity(rq, env->sd)) in find_busiest_queue()
10553 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && in asym_active_balance()
10560 struct sched_domain *sd = env->sd; in imbalanced_active_balance() local
10568 (sd->nr_balance_failed > sd->cache_nice_tries+2)) in imbalanced_active_balance()
10576 struct sched_domain *sd = env->sd; in need_active_balance() local
10592 if ((check_cpu_capacity(env->src_rq, sd)) && in need_active_balance()
10593 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) in need_active_balance()
10607 struct sched_group *sg = env->sd->groups; in should_we_balance()
10648 struct sched_domain *sd, enum cpu_idle_type idle, in load_balance() argument
10652 struct sched_domain *sd_parent = sd->parent; in load_balance()
10658 .sd = sd, in load_balance()
10661 .dst_grpmask = group_balance_mask(sd->groups), in load_balance()
10669 cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); in load_balance()
10671 schedstat_inc(sd->lb_count[idle]); in load_balance()
10681 schedstat_inc(sd->lb_nobusyg[idle]); in load_balance()
10687 schedstat_inc(sd->lb_nobusyq[idle]); in load_balance()
10693 schedstat_add(sd->lb_imbalance[idle], env.imbalance); in load_balance()
10813 schedstat_inc(sd->lb_failed[idle]); in load_balance()
10821 sd->nr_balance_failed++; in load_balance()
10862 sd->nr_balance_failed = 0; in load_balance()
10867 sd->balance_interval = sd->min_interval; in load_balance()
10891 schedstat_inc(sd->lb_balanced[idle]); in load_balance()
10893 sd->nr_balance_failed = 0; in load_balance()
10909 sd->balance_interval < MAX_PINNED_INTERVAL) || in load_balance()
10910 sd->balance_interval < sd->max_interval) in load_balance()
10911 sd->balance_interval *= 2; in load_balance()
10917 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) in get_sd_balance_interval() argument
10919 unsigned long interval = sd->balance_interval; in get_sd_balance_interval()
10922 interval *= sd->busy_factor; in get_sd_balance_interval()
10941 update_next_balance(struct sched_domain *sd, unsigned long *next_balance) in update_next_balance() argument
10946 interval = get_sd_balance_interval(sd, 0); in update_next_balance()
10947 next = sd->last_balance + interval; in update_next_balance()
10965 struct sched_domain *sd; in active_load_balance_cpu_stop() local
10996 for_each_domain(target_cpu, sd) { in active_load_balance_cpu_stop()
10997 if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) in active_load_balance_cpu_stop()
11001 if (likely(sd)) { in active_load_balance_cpu_stop()
11003 .sd = sd, in active_load_balance_cpu_stop()
11013 schedstat_inc(sd->alb_count); in active_load_balance_cpu_stop()
11018 schedstat_inc(sd->alb_pushed); in active_load_balance_cpu_stop()
11020 sd->nr_balance_failed = 0; in active_load_balance_cpu_stop()
11022 schedstat_inc(sd->alb_failed); in active_load_balance_cpu_stop()
11049 static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost) in update_newidle_cost() argument
11051 if (cost > sd->max_newidle_lb_cost) { in update_newidle_cost()
11056 sd->max_newidle_lb_cost = cost; in update_newidle_cost()
11057 sd->last_decay_max_lb_cost = jiffies; in update_newidle_cost()
11058 } else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) { in update_newidle_cost()
11064 sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * 253) / 256; in update_newidle_cost()
11065 sd->last_decay_max_lb_cost = jiffies; in update_newidle_cost()
11085 struct sched_domain *sd; in rebalance_domains() local
11097 for_each_domain(cpu, sd) { in rebalance_domains()
11102 need_decay = update_newidle_cost(sd, 0); in rebalance_domains()
11103 max_cost += sd->max_newidle_lb_cost; in rebalance_domains()
11116 interval = get_sd_balance_interval(sd, busy); in rebalance_domains()
11118 need_serialize = sd->flags & SD_SERIALIZE; in rebalance_domains()
11124 if (time_after_eq(jiffies, sd->last_balance + interval)) { in rebalance_domains()
11125 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { in rebalance_domains()
11134 sd->last_balance = jiffies; in rebalance_domains()
11135 interval = get_sd_balance_interval(sd, busy); in rebalance_domains()
11140 if (time_after(next_balance, sd->last_balance + interval)) { in rebalance_domains()
11141 next_balance = sd->last_balance + interval; in rebalance_domains()
11167 return unlikely(!rcu_dereference_sched(rq->sd)); in on_null_domain()
11247 struct sched_domain *sd; in nohz_balancer_kick() local
11286 sd = rcu_dereference(rq->sd); in nohz_balancer_kick()
11287 if (sd) { in nohz_balancer_kick()
11293 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { in nohz_balancer_kick()
11299 sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); in nohz_balancer_kick()
11300 if (sd) { in nohz_balancer_kick()
11306 for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { in nohz_balancer_kick()
11314 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu)); in nohz_balancer_kick()
11315 if (sd) { in nohz_balancer_kick()
11320 if (check_misfit_status(rq, sd)) { in nohz_balancer_kick()
11364 struct sched_domain *sd; in set_cpu_sd_state_busy() local
11367 sd = rcu_dereference(per_cpu(sd_llc, cpu)); in set_cpu_sd_state_busy()
11369 if (!sd || !sd->nohz_idle) in set_cpu_sd_state_busy()
11371 sd->nohz_idle = 0; in set_cpu_sd_state_busy()
11373 atomic_inc(&sd->shared->nr_busy_cpus); in set_cpu_sd_state_busy()
11394 struct sched_domain *sd; in set_cpu_sd_state_idle() local
11397 sd = rcu_dereference(per_cpu(sd_llc, cpu)); in set_cpu_sd_state_idle()
11399 if (!sd || sd->nohz_idle) in set_cpu_sd_state_idle()
11401 sd->nohz_idle = 1; in set_cpu_sd_state_idle()
11403 atomic_dec(&sd->shared->nr_busy_cpus); in set_cpu_sd_state_idle()
11682 struct sched_domain *sd; in newidle_balance() local
11720 sd = rcu_dereference_check_sched_domain(this_rq->sd); in newidle_balance()
11723 (sd && this_rq->avg_idle < sd->max_newidle_lb_cost)) { in newidle_balance()
11725 if (sd) in newidle_balance()
11726 update_next_balance(sd, &next_balance); in newidle_balance()
11739 for_each_domain(this_cpu, sd) { in newidle_balance()
11743 update_next_balance(sd, &next_balance); in newidle_balance()
11745 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) in newidle_balance()
11748 if (sd->flags & SD_BALANCE_NEWIDLE) { in newidle_balance()
11751 sd, CPU_NEWLY_IDLE, in newidle_balance()
11756 update_newidle_cost(sd, domain_cost); in newidle_balance()