Lines Matching refs:cpu
39 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, in sched_domain_debug_one() argument
52 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one()
53 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); in sched_domain_debug_one()
55 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { in sched_domain_debug_one()
56 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); in sched_domain_debug_one()
135 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() argument
143 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); in sched_domain_debug()
147 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); in sched_domain_debug()
150 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) in sched_domain_debug()
161 # define sched_domain_debug(sd, cpu) do { } while (0) argument
255 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu) in find_pd() argument
258 if (cpumask_test_cpu(cpu, perf_domain_span(pd))) in find_pd()
266 static struct perf_domain *pd_init(int cpu) in pd_init() argument
268 struct em_perf_domain *obj = em_cpu_get(cpu); in pd_init()
273 pr_info("%s: no EM found for CPU%d\n", __func__, cpu); in pd_init()
354 int cpu = cpumask_first(cpu_map); in build_perf_domains() local
355 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()
366 if (!per_cpu(sd_asym_cpucapacity, cpu) && !eas_check) { in build_perf_domains()
455 if (cpumask_test_cpu(rq->cpu, old_rd->online)) in rq_attach_root()
458 cpumask_clear_cpu(rq->cpu, old_rd->span); in rq_attach_root()
472 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
473 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) in rq_attach_root()
632 static void update_top_cache_domain(int cpu) in update_top_cache_domain() argument
636 int id = cpu; in update_top_cache_domain()
639 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); in update_top_cache_domain()
646 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); in update_top_cache_domain()
647 per_cpu(sd_llc_size, cpu) = size; in update_top_cache_domain()
648 per_cpu(sd_llc_id, cpu) = id; in update_top_cache_domain()
649 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); in update_top_cache_domain()
651 sd = lowest_flag_domain(cpu, SD_NUMA); in update_top_cache_domain()
652 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); in update_top_cache_domain()
654 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); in update_top_cache_domain()
655 rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); in update_top_cache_domain()
657 sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY); in update_top_cache_domain()
658 rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); in update_top_cache_domain()
666 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) in cpu_attach_domain() argument
668 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain()
701 sched_domain_debug(sd, cpu); in cpu_attach_domain()
706 dirty_sched_domain_sysctl(cpu); in cpu_attach_domain()
709 update_top_cache_domain(cpu); in cpu_attach_domain()
882 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) in build_group_from_child_sched_domain() argument
888 GFP_KERNEL, cpu_to_node(cpu)); in build_group_from_child_sched_domain()
909 int cpu; in init_overlap_sched_group() local
912 cpu = cpumask_first_and(sched_group_span(sg), mask); in init_overlap_sched_group()
914 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
932 build_overlap_sched_groups(struct sched_domain *sd, int cpu) in build_overlap_sched_groups() argument
943 for_each_cpu_wrap(i, span, cpu) { in build_overlap_sched_groups()
964 sg = build_group_from_child_sched_domain(sibling, cpu); in build_overlap_sched_groups()
1062 static struct sched_group *get_group(int cpu, struct sd_data *sdd) in get_group() argument
1064 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group()
1070 cpu = cpumask_first(sched_domain_span(child)); in get_group()
1072 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
1073 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1088 cpumask_set_cpu(cpu, sched_group_span(sg)); in get_group()
1089 cpumask_set_cpu(cpu, group_balance_mask(sg)); in get_group()
1107 build_sched_groups(struct sched_domain *sd, int cpu) in build_sched_groups() argument
1120 for_each_cpu_wrap(i, span, cpu) { in build_sched_groups()
1152 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) in init_sched_groups_capacity() argument
1159 int cpu, max_cpu = -1; in init_sched_groups_capacity() local
1166 for_each_cpu(cpu, sched_group_span(sg)) { in init_sched_groups_capacity()
1168 max_cpu = cpu; in init_sched_groups_capacity()
1169 else if (sched_asym_prefer(cpu, max_cpu)) in init_sched_groups_capacity()
1170 max_cpu = cpu; in init_sched_groups_capacity()
1178 if (cpu != group_balance_cpu(sg)) in init_sched_groups_capacity()
1181 update_group_capacity(sd, cpu); in init_sched_groups_capacity()
1263 static void claim_allocations(int cpu, struct sched_domain *sd) in claim_allocations() argument
1267 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
1268 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
1270 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations()
1271 *per_cpu_ptr(sdd->sds, cpu) = NULL; in claim_allocations()
1273 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations()
1274 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations()
1276 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) in claim_allocations()
1277 *per_cpu_ptr(sdd->sgc, cpu) = NULL; in claim_allocations()
1317 struct sched_domain *child, int dflags, int cpu) in sd_init() argument
1320 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in sd_init()
1330 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init()
1372 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); in sd_init()
1452 static const struct cpumask *sd_numa_mask(int cpu) in sd_numa_mask() argument
1454 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; in sd_numa_mask()
1692 void sched_domains_numa_masks_set(unsigned int cpu) in sched_domains_numa_masks_set() argument
1694 int node = cpu_to_node(cpu); in sched_domains_numa_masks_set()
1700 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); in sched_domains_numa_masks_set()
1705 void sched_domains_numa_masks_clear(unsigned int cpu) in sched_domains_numa_masks_clear() argument
1711 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); in sched_domains_numa_masks_clear()
1723 int sched_numa_find_closest(const struct cpumask *cpus, int cpu) in sched_numa_find_closest() argument
1725 int i, j = cpu_to_node(cpu); in sched_numa_find_closest()
1728 cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]); in sched_numa_find_closest()
1729 if (cpu < nr_cpu_ids) in sched_numa_find_closest()
1730 return cpu; in sched_numa_find_closest()
1844 struct sched_domain *child, int dflags, int cpu) in build_sched_domain() argument
1846 struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu); in build_sched_domain()
1877 const struct cpumask *cpu_map, int cpu) in topology_span_sane() argument
1892 if (i == cpu) in topology_span_sane()
1900 if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && in topology_span_sane()
1901 cpumask_intersects(tl->mask(cpu), tl->mask(i))) in topology_span_sane()
2153 unsigned int cpu = cpumask_any(cpu_map); in detach_destroy_domains() local
2156 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) in detach_destroy_domains()