• Home
  • Raw
  • Download

Lines Matching refs:cpu

28 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,  in sched_domain_debug_one()  argument
47 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one()
48 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); in sched_domain_debug_one()
50 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { in sched_domain_debug_one()
51 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); in sched_domain_debug_one()
115 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() argument
123 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); in sched_domain_debug()
127 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); in sched_domain_debug()
130 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) in sched_domain_debug()
141 # define sched_domain_debug(sd, cpu) do { } while (0) argument
246 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu) in find_pd() argument
249 if (cpumask_test_cpu(cpu, perf_domain_span(pd))) in find_pd()
257 static struct perf_domain *pd_init(int cpu) in pd_init() argument
259 struct em_perf_domain *obj = em_cpu_get(cpu); in pd_init()
264 pr_info("%s: no EM found for CPU%d\n", __func__, cpu); in pd_init()
346 int cpu = cpumask_first(cpu_map); in build_perf_domains() local
347 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()
355 if (!per_cpu(sd_asym_cpucapacity, cpu)) { in build_perf_domains()
450 if (cpumask_test_cpu(rq->cpu, old_rd->online)) in rq_attach_root()
453 cpumask_clear_cpu(rq->cpu, old_rd->span); in rq_attach_root()
467 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
468 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) in rq_attach_root()
630 static void update_top_cache_domain(int cpu) in update_top_cache_domain() argument
634 int id = cpu; in update_top_cache_domain()
637 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); in update_top_cache_domain()
644 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); in update_top_cache_domain()
645 per_cpu(sd_llc_size, cpu) = size; in update_top_cache_domain()
646 per_cpu(sd_llc_id, cpu) = id; in update_top_cache_domain()
647 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); in update_top_cache_domain()
649 sd = lowest_flag_domain(cpu, SD_NUMA); in update_top_cache_domain()
650 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); in update_top_cache_domain()
652 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); in update_top_cache_domain()
653 rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); in update_top_cache_domain()
655 sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY); in update_top_cache_domain()
656 rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); in update_top_cache_domain()
664 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) in cpu_attach_domain() argument
666 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain()
699 sched_domain_debug(sd, cpu); in cpu_attach_domain()
704 dirty_sched_domain_sysctl(cpu); in cpu_attach_domain()
707 update_top_cache_domain(cpu); in cpu_attach_domain()
880 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) in build_group_from_child_sched_domain() argument
886 GFP_KERNEL, cpu_to_node(cpu)); in build_group_from_child_sched_domain()
907 int cpu; in init_overlap_sched_group() local
910 cpu = cpumask_first_and(sched_group_span(sg), mask); in init_overlap_sched_group()
912 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
930 build_overlap_sched_groups(struct sched_domain *sd, int cpu) in build_overlap_sched_groups() argument
941 for_each_cpu_wrap(i, span, cpu) { in build_overlap_sched_groups()
962 sg = build_group_from_child_sched_domain(sibling, cpu); in build_overlap_sched_groups()
1060 static struct sched_group *get_group(int cpu, struct sd_data *sdd) in get_group() argument
1062 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group()
1068 cpu = cpumask_first(sched_domain_span(child)); in get_group()
1070 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
1071 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1086 cpumask_set_cpu(cpu, sched_group_span(sg)); in get_group()
1087 cpumask_set_cpu(cpu, group_balance_mask(sg)); in get_group()
1105 build_sched_groups(struct sched_domain *sd, int cpu) in build_sched_groups() argument
1118 for_each_cpu_wrap(i, span, cpu) { in build_sched_groups()
1150 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) in init_sched_groups_capacity() argument
1157 int cpu, max_cpu = -1; in init_sched_groups_capacity() local
1164 for_each_cpu(cpu, sched_group_span(sg)) { in init_sched_groups_capacity()
1166 max_cpu = cpu; in init_sched_groups_capacity()
1167 else if (sched_asym_prefer(cpu, max_cpu)) in init_sched_groups_capacity()
1168 max_cpu = cpu; in init_sched_groups_capacity()
1176 if (cpu != group_balance_cpu(sg)) in init_sched_groups_capacity()
1179 update_group_capacity(sd, cpu); in init_sched_groups_capacity()
1264 static void claim_allocations(int cpu, struct sched_domain *sd) in claim_allocations() argument
1268 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
1269 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
1271 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations()
1272 *per_cpu_ptr(sdd->sds, cpu) = NULL; in claim_allocations()
1274 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations()
1275 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations()
1277 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) in claim_allocations()
1278 *per_cpu_ptr(sdd->sgc, cpu) = NULL; in claim_allocations()
1320 struct sched_domain *child, int dflags, int cpu) in sd_init() argument
1323 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in sd_init()
1333 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init()
1376 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); in sd_init()
1465 static const struct cpumask *sd_numa_mask(int cpu) in sd_numa_mask() argument
1467 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; in sd_numa_mask()
1708 void sched_domains_numa_masks_set(unsigned int cpu) in sched_domains_numa_masks_set() argument
1710 int node = cpu_to_node(cpu); in sched_domains_numa_masks_set()
1716 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); in sched_domains_numa_masks_set()
1721 void sched_domains_numa_masks_clear(unsigned int cpu) in sched_domains_numa_masks_clear() argument
1727 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); in sched_domains_numa_masks_clear()
1739 int sched_numa_find_closest(const struct cpumask *cpus, int cpu) in sched_numa_find_closest() argument
1741 int i, j = cpu_to_node(cpu); in sched_numa_find_closest()
1744 cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]); in sched_numa_find_closest()
1745 if (cpu < nr_cpu_ids) in sched_numa_find_closest()
1746 return cpu; in sched_numa_find_closest()
1860 struct sched_domain *child, int dflags, int cpu) in build_sched_domain() argument
1862 struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu); in build_sched_domain()
2118 unsigned int cpu = cpumask_any(cpu_map); in detach_destroy_domains() local
2121 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) in detach_destroy_domains()