Lines Matching refs:sd
28 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, in sched_domain_debug_one() argument
31 struct sched_group *group = sd->groups; in sched_domain_debug_one()
37 if (!(sd->flags & SD_LOAD_BALANCE)) { in sched_domain_debug_one()
39 if (sd->parent) in sched_domain_debug_one()
45 cpumask_pr_args(sched_domain_span(sd)), sd->name); in sched_domain_debug_one()
47 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one()
68 if (!(sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
81 if ((sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
90 if (group == sd->groups && sd->child && in sched_domain_debug_one()
91 !cpumask_equal(sched_domain_span(sd->child), in sched_domain_debug_one()
100 if (group != sd->groups) in sched_domain_debug_one()
103 } while (group != sd->groups); in sched_domain_debug_one()
106 if (!cpumask_equal(sched_domain_span(sd), groupmask)) in sched_domain_debug_one()
109 if (sd->parent && in sched_domain_debug_one()
110 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) in sched_domain_debug_one()
115 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() argument
122 if (!sd) { in sched_domain_debug()
130 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) in sched_domain_debug()
133 sd = sd->parent; in sched_domain_debug()
134 if (!sd) in sched_domain_debug()
141 # define sched_domain_debug(sd, cpu) do { } while (0) argument
148 static int sd_degenerate(struct sched_domain *sd) in sd_degenerate() argument
150 if (cpumask_weight(sched_domain_span(sd)) == 1) in sd_degenerate()
154 if (sd->flags & (SD_LOAD_BALANCE | in sd_degenerate()
162 if (sd->groups != sd->groups->next) in sd_degenerate()
167 if (sd->flags & (SD_WAKE_AFFINE)) in sd_degenerate()
174 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) in sd_parent_degenerate() argument
176 unsigned long cflags = sd->flags, pflags = parent->flags; in sd_parent_degenerate()
181 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) in sd_parent_degenerate()
581 static void destroy_sched_domain(struct sched_domain *sd) in destroy_sched_domain() argument
588 free_sched_groups(sd->groups, 1); in destroy_sched_domain()
590 if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) in destroy_sched_domain()
591 kfree(sd->shared); in destroy_sched_domain()
592 kfree(sd); in destroy_sched_domain()
597 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); in destroy_sched_domains_rcu() local
599 while (sd) { in destroy_sched_domains_rcu()
600 struct sched_domain *parent = sd->parent; in destroy_sched_domains_rcu()
601 destroy_sched_domain(sd); in destroy_sched_domains_rcu()
602 sd = parent; in destroy_sched_domains_rcu()
606 static void destroy_sched_domains(struct sched_domain *sd) in destroy_sched_domains() argument
608 if (sd) in destroy_sched_domains()
609 call_rcu(&sd->rcu, destroy_sched_domains_rcu); in destroy_sched_domains()
633 struct sched_domain *sd; in update_top_cache_domain() local
637 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); in update_top_cache_domain()
638 if (sd) { in update_top_cache_domain()
639 id = cpumask_first(sched_domain_span(sd)); in update_top_cache_domain()
640 size = cpumask_weight(sched_domain_span(sd)); in update_top_cache_domain()
641 sds = sd->shared; in update_top_cache_domain()
644 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); in update_top_cache_domain()
649 sd = lowest_flag_domain(cpu, SD_NUMA); in update_top_cache_domain()
650 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); in update_top_cache_domain()
652 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); in update_top_cache_domain()
653 rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); in update_top_cache_domain()
655 sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY); in update_top_cache_domain()
656 rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); in update_top_cache_domain()
664 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) in cpu_attach_domain() argument
670 for (tmp = sd; tmp; ) { in cpu_attach_domain()
691 if (sd && sd_degenerate(sd)) { in cpu_attach_domain()
692 tmp = sd; in cpu_attach_domain()
693 sd = sd->parent; in cpu_attach_domain()
695 if (sd) in cpu_attach_domain()
696 sd->child = NULL; in cpu_attach_domain()
699 sched_domain_debug(sd, cpu); in cpu_attach_domain()
702 tmp = rq->sd; in cpu_attach_domain()
703 rcu_assign_pointer(rq->sd, sd); in cpu_attach_domain()
711 struct sched_domain * __percpu *sd; member
843 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) in build_balance_mask() argument
846 struct sd_data *sdd = sd->private; in build_balance_mask()
853 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask()
880 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) in build_group_from_child_sched_domain() argument
892 if (sd->child) in build_group_from_child_sched_domain()
893 cpumask_copy(sg_span, sched_domain_span(sd->child)); in build_group_from_child_sched_domain()
895 cpumask_copy(sg_span, sched_domain_span(sd)); in build_group_from_child_sched_domain()
901 static void init_overlap_sched_group(struct sched_domain *sd, in init_overlap_sched_group() argument
905 struct sd_data *sdd = sd->private; in init_overlap_sched_group()
909 build_balance_mask(sd, sg, mask); in init_overlap_sched_group()
930 build_overlap_sched_groups(struct sched_domain *sd, int cpu) in build_overlap_sched_groups() argument
933 const struct cpumask *span = sched_domain_span(sd); in build_overlap_sched_groups()
935 struct sd_data *sdd = sd->private; in build_overlap_sched_groups()
947 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups()
969 init_overlap_sched_group(sd, sg); in build_overlap_sched_groups()
978 sd->groups = first; in build_overlap_sched_groups()
1062 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group() local
1063 struct sched_domain *child = sd->child; in get_group()
1105 build_sched_groups(struct sched_domain *sd, int cpu) in build_sched_groups() argument
1108 struct sd_data *sdd = sd->private; in build_sched_groups()
1109 const struct cpumask *span = sched_domain_span(sd); in build_sched_groups()
1135 sd->groups = first; in build_sched_groups()
1150 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) in init_sched_groups_capacity() argument
1152 struct sched_group *sg = sd->groups; in init_sched_groups_capacity()
1161 if (!(sd->flags & SD_ASYM_PACKING)) in init_sched_groups_capacity()
1174 } while (sg != sd->groups); in init_sched_groups_capacity()
1179 update_group_capacity(sd, cpu); in init_sched_groups_capacity()
1199 static void set_domain_attribute(struct sched_domain *sd, in set_domain_attribute() argument
1211 if (request < sd->level) { in set_domain_attribute()
1213 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); in set_domain_attribute()
1216 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); in set_domain_attribute()
1232 free_percpu(d->sd); in __free_domain_allocs()
1249 d->sd = alloc_percpu(struct sched_domain *); in __visit_domain_allocation_hell()
1250 if (!d->sd) in __visit_domain_allocation_hell()
1264 static void claim_allocations(int cpu, struct sched_domain *sd) in claim_allocations() argument
1266 struct sd_data *sdd = sd->private; in claim_allocations()
1268 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
1269 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
1323 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in sd_init() local
1344 *sd = (struct sched_domain){ in sd_init()
1376 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); in sd_init()
1377 sd_id = cpumask_first(sched_domain_span(sd)); in sd_init()
1383 if (sd->flags & SD_ASYM_CPUCAPACITY) { in sd_init()
1384 struct sched_domain *t = sd; in sd_init()
1389 if (sd->child) in sd_init()
1390 sd->child->flags &= ~SD_PREFER_SIBLING; in sd_init()
1396 if (sd->flags & SD_SHARE_CPUCAPACITY) { in sd_init()
1397 sd->imbalance_pct = 110; in sd_init()
1399 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { in sd_init()
1400 sd->imbalance_pct = 117; in sd_init()
1401 sd->cache_nice_tries = 1; in sd_init()
1404 } else if (sd->flags & SD_NUMA) { in sd_init()
1405 sd->cache_nice_tries = 2; in sd_init()
1407 sd->flags &= ~SD_PREFER_SIBLING; in sd_init()
1408 sd->flags |= SD_SERIALIZE; in sd_init()
1410 sd->flags &= ~(SD_BALANCE_EXEC | in sd_init()
1417 sd->cache_nice_tries = 1; in sd_init()
1424 if (sd->flags & SD_SHARE_PKG_RESOURCES) { in sd_init()
1425 sd->shared = *per_cpu_ptr(sdd->sds, sd_id); in sd_init()
1426 atomic_inc(&sd->shared->ref); in sd_init()
1427 atomic_set(&sd->shared->nr_busy_cpus, sd_weight); in sd_init()
1430 sd->private = sdd; in sd_init()
1432 return sd; in sd_init()
1761 sdd->sd = alloc_percpu(struct sched_domain *); in __sdt_alloc()
1762 if (!sdd->sd) in __sdt_alloc()
1778 struct sched_domain *sd; in __sdt_alloc() local
1783 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), in __sdt_alloc()
1785 if (!sd) in __sdt_alloc()
1788 *per_cpu_ptr(sdd->sd, j) = sd; in __sdt_alloc()
1831 struct sched_domain *sd; in __sdt_free() local
1833 if (sdd->sd) { in __sdt_free()
1834 sd = *per_cpu_ptr(sdd->sd, j); in __sdt_free()
1835 if (sd && (sd->flags & SD_OVERLAP)) in __sdt_free()
1836 free_sched_groups(sd->groups, 0); in __sdt_free()
1837 kfree(*per_cpu_ptr(sdd->sd, j)); in __sdt_free()
1847 free_percpu(sdd->sd); in __sdt_free()
1848 sdd->sd = NULL; in __sdt_free()
1862 struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu); in build_sched_domain() local
1865 sd->level = child->level + 1; in build_sched_domain()
1866 sched_domain_level_max = max(sched_domain_level_max, sd->level); in build_sched_domain()
1867 child->parent = sd; in build_sched_domain()
1870 sched_domain_span(sd))) { in build_sched_domain()
1874 child->name, sd->name); in build_sched_domain()
1877 cpumask_or(sched_domain_span(sd), in build_sched_domain()
1878 sched_domain_span(sd), in build_sched_domain()
1883 set_domain_attribute(sd, attr); in build_sched_domain()
1885 return sd; in build_sched_domain()
1955 struct sched_domain *sd; in build_sched_domains() local
1974 sd = NULL; in build_sched_domains()
1983 sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i); in build_sched_domains()
1986 *per_cpu_ptr(d.sd, i) = sd; in build_sched_domains()
1988 sd->flags |= SD_OVERLAP; in build_sched_domains()
1989 if (cpumask_equal(cpu_map, sched_domain_span(sd))) in build_sched_domains()
1996 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
1997 sd->span_weight = cpumask_weight(sched_domain_span(sd)); in build_sched_domains()
1998 if (sd->flags & SD_OVERLAP) { in build_sched_domains()
1999 if (build_overlap_sched_groups(sd, i)) in build_sched_domains()
2002 if (build_sched_groups(sd, i)) in build_sched_domains()
2013 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
2014 claim_allocations(i, sd); in build_sched_domains()
2015 init_sched_groups_capacity(i, sd); in build_sched_domains()
2022 sd = *per_cpu_ptr(d.sd, i); in build_sched_domains()
2023 cpu_attach_domain(sd, d.rd, i); in build_sched_domains()