• Home
  • Raw
  • Download

Lines Matching refs:cpu

38 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,  in sched_domain_debug_one()  argument
51 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one()
52 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); in sched_domain_debug_one()
54 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { in sched_domain_debug_one()
55 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); in sched_domain_debug_one()
134 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() argument
142 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); in sched_domain_debug()
146 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); in sched_domain_debug()
149 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) in sched_domain_debug()
160 # define sched_domain_debug(sd, cpu) do { } while (0) argument
279 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu) in find_pd() argument
282 if (cpumask_test_cpu(cpu, perf_domain_span(pd))) in find_pd()
290 static struct perf_domain *pd_init(int cpu) in pd_init() argument
292 struct em_perf_domain *obj = em_cpu_get(cpu); in pd_init()
297 pr_info("%s: no EM found for CPU%d\n", __func__, cpu); in pd_init()
379 int cpu = cpumask_first(cpu_map); in build_perf_domains() local
380 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()
391 if (!per_cpu(sd_asym_cpucapacity, cpu) && !eas_check) { in build_perf_domains()
488 if (cpumask_test_cpu(rq->cpu, old_rd->online)) in rq_attach_root()
491 cpumask_clear_cpu(rq->cpu, old_rd->span); in rq_attach_root()
505 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
506 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) in rq_attach_root()
666 static void update_top_cache_domain(int cpu) in update_top_cache_domain() argument
670 int id = cpu; in update_top_cache_domain()
673 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); in update_top_cache_domain()
680 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); in update_top_cache_domain()
681 per_cpu(sd_llc_size, cpu) = size; in update_top_cache_domain()
682 per_cpu(sd_llc_id, cpu) = id; in update_top_cache_domain()
683 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); in update_top_cache_domain()
685 sd = lowest_flag_domain(cpu, SD_NUMA); in update_top_cache_domain()
686 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); in update_top_cache_domain()
688 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); in update_top_cache_domain()
689 rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); in update_top_cache_domain()
691 sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL); in update_top_cache_domain()
692 rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); in update_top_cache_domain()
700 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) in cpu_attach_domain() argument
702 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain()
747 sched_domain_debug(sd, cpu); in cpu_attach_domain()
752 dirty_sched_domain_sysctl(cpu); in cpu_attach_domain()
755 update_top_cache_domain(cpu); in cpu_attach_domain()
928 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) in build_group_from_child_sched_domain() argument
934 GFP_KERNEL, cpu_to_node(cpu)); in build_group_from_child_sched_domain()
957 int cpu; in init_overlap_sched_group() local
960 cpu = cpumask_first(mask); in init_overlap_sched_group()
962 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
1005 build_overlap_sched_groups(struct sched_domain *sd, int cpu) in build_overlap_sched_groups() argument
1016 for_each_cpu_wrap(i, span, cpu) { in build_overlap_sched_groups()
1072 sg = build_group_from_child_sched_domain(sibling, cpu); in build_overlap_sched_groups()
1170 static struct sched_group *get_group(int cpu, struct sd_data *sdd) in get_group() argument
1172 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group()
1178 cpu = cpumask_first(sched_domain_span(child)); in get_group()
1180 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
1181 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1197 cpumask_set_cpu(cpu, sched_group_span(sg)); in get_group()
1198 cpumask_set_cpu(cpu, group_balance_mask(sg)); in get_group()
1216 build_sched_groups(struct sched_domain *sd, int cpu) in build_sched_groups() argument
1229 for_each_cpu_wrap(i, span, cpu) { in build_sched_groups()
1261 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) in init_sched_groups_capacity() argument
1268 int cpu, max_cpu = -1; in init_sched_groups_capacity() local
1275 for_each_cpu(cpu, sched_group_span(sg)) { in init_sched_groups_capacity()
1277 max_cpu = cpu; in init_sched_groups_capacity()
1278 else if (sched_asym_prefer(cpu, max_cpu)) in init_sched_groups_capacity()
1279 max_cpu = cpu; in init_sched_groups_capacity()
1287 if (cpu != group_balance_cpu(sg)) in init_sched_groups_capacity()
1290 update_group_capacity(sd, cpu); in init_sched_groups_capacity()
1350 static inline void asym_cpu_capacity_update_data(int cpu) in asym_cpu_capacity_update_data() argument
1352 unsigned long capacity = arch_scale_cpu_capacity(cpu); in asym_cpu_capacity_update_data()
1366 __cpumask_set_cpu(cpu, cpu_capacity_span(entry)); in asym_cpu_capacity_update_data()
1377 int cpu; in asym_cpu_capacity_scan() local
1382 for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) in asym_cpu_capacity_scan()
1383 asym_cpu_capacity_update_data(cpu); in asym_cpu_capacity_scan()
1482 static void claim_allocations(int cpu, struct sched_domain *sd) in claim_allocations() argument
1486 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
1487 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
1489 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations()
1490 *per_cpu_ptr(sdd->sds, cpu) = NULL; in claim_allocations()
1492 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations()
1493 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations()
1495 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) in claim_allocations()
1496 *per_cpu_ptr(sdd->sgc, cpu) = NULL; in claim_allocations()
1535 struct sched_domain *child, int cpu) in sd_init() argument
1538 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in sd_init()
1549 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init()
1589 cpumask_and(sd_span, cpu_map, tl->mask(cpu)); in sd_init()
1681 static const struct cpumask *sd_numa_mask(int cpu) in sd_numa_mask() argument
1683 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; in sd_numa_mask()
1986 void sched_update_numa(int cpu, bool online) in sched_update_numa() argument
1990 node = cpu_to_node(cpu); in sched_update_numa()
2002 void sched_domains_numa_masks_set(unsigned int cpu) in sched_domains_numa_masks_set() argument
2004 int node = cpu_to_node(cpu); in sched_domains_numa_masks_set()
2014 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); in sched_domains_numa_masks_set()
2019 void sched_domains_numa_masks_clear(unsigned int cpu) in sched_domains_numa_masks_clear() argument
2026 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); in sched_domains_numa_masks_clear()
2039 int sched_numa_find_closest(const struct cpumask *cpus, int cpu) in sched_numa_find_closest() argument
2041 int i, j = cpu_to_node(cpu), found = nr_cpu_ids; in sched_numa_find_closest()
2051 cpu = cpumask_any_and(cpus, masks[i][j]); in sched_numa_find_closest()
2052 if (cpu < nr_cpu_ids) { in sched_numa_find_closest()
2053 found = cpu; in sched_numa_find_closest()
2172 struct sched_domain *child, int cpu) in build_sched_domain() argument
2174 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); in build_sched_domain()
2205 const struct cpumask *cpu_map, int cpu) in topology_span_sane() argument
2220 if (i == cpu) in topology_span_sane()
2228 if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && in topology_span_sane()
2229 cpumask_intersects(tl->mask(cpu), tl->mask(i))) in topology_span_sane()
2474 unsigned int cpu = cpumask_any(cpu_map); in detach_destroy_domains() local
2477 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) in detach_destroy_domains()