• Home
  • Raw
  • Download

Lines Matching refs:cpu_map

309 static void perf_domain_debug(const struct cpumask *cpu_map,  in perf_domain_debug()  argument
315 printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); in perf_domain_debug()
375 static bool build_perf_domains(const struct cpumask *cpu_map) in build_perf_domains() argument
377 int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map); in build_perf_domains()
379 int cpu = cpumask_first(cpu_map); in build_perf_domains()
394 cpumask_pr_args(cpu_map)); in build_perf_domains()
402 cpumask_pr_args(cpu_map)); in build_perf_domains()
409 cpumask_pr_args(cpu_map)); in build_perf_domains()
414 for_each_cpu(i, cpu_map) { in build_perf_domains()
437 cpumask_pr_args(cpu_map)); in build_perf_domains()
441 perf_domain_debug(cpu_map, pd); in build_perf_domains()
1318 const struct cpumask *cpu_map) in asym_cpu_capacity_classify() argument
1332 else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry))) in asym_cpu_capacity_classify()
1438 static void __sdt_free(const struct cpumask *cpu_map);
1439 static int __sdt_alloc(const struct cpumask *cpu_map);
1442 const struct cpumask *cpu_map) in __free_domain_allocs() argument
1453 __sdt_free(cpu_map); in __free_domain_allocs()
1461 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) in __visit_domain_allocation_hell() argument
1465 if (__sdt_alloc(cpu_map)) in __visit_domain_allocation_hell()
1534 const struct cpumask *cpu_map, in sd_init() argument
1589 cpumask_and(sd_span, cpu_map, tl->mask(cpu)); in sd_init()
1592 sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); in sd_init()
2065 static int __sdt_alloc(const struct cpumask *cpu_map) in __sdt_alloc() argument
2089 for_each_cpu(j, cpu_map) { in __sdt_alloc()
2134 static void __sdt_free(const struct cpumask *cpu_map) in __sdt_free() argument
2142 for_each_cpu(j, cpu_map) { in __sdt_free()
2171 const struct cpumask *cpu_map, struct sched_domain_attr *attr, in build_sched_domain() argument
2174 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); in build_sched_domain()
2205 const struct cpumask *cpu_map, int cpu) in topology_span_sane() argument
2219 for_each_cpu(i, cpu_map) { in topology_span_sane()
2241 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) in build_sched_domains() argument
2250 if (WARN_ON(cpumask_empty(cpu_map))) in build_sched_domains()
2253 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); in build_sched_domains()
2258 for_each_cpu(i, cpu_map) { in build_sched_domains()
2264 if (WARN_ON(!topology_span_sane(tl, cpu_map, i))) in build_sched_domains()
2267 sd = build_sched_domain(tl, cpu_map, attr, sd, i); in build_sched_domains()
2275 if (cpumask_equal(cpu_map, sched_domain_span(sd))) in build_sched_domains()
2281 for_each_cpu(i, cpu_map) { in build_sched_domains()
2298 for_each_cpu(i, cpu_map) { in build_sched_domains()
2354 if (!cpumask_test_cpu(i, cpu_map)) in build_sched_domains()
2365 for_each_cpu(i, cpu_map) { in build_sched_domains()
2382 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); in build_sched_domains()
2388 __free_domain_allocs(&d, alloc_state, cpu_map); in build_sched_domains()
2448 int sched_init_domains(const struct cpumask *cpu_map) in sched_init_domains() argument
2462 cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN)); in sched_init_domains()
2472 static void detach_destroy_domains(const struct cpumask *cpu_map) in detach_destroy_domains() argument
2474 unsigned int cpu = cpumask_any(cpu_map); in detach_destroy_domains()
2481 for_each_cpu(i, cpu_map) in detach_destroy_domains()