• Home
  • Raw
  • Download

Lines Matching refs:cpu_map

7156 			const struct cpumask *cpu_map,  in init_sched_build_groups()  argument
7157 int (*group_fn)(int cpu, const struct cpumask *cpu_map, in init_sched_build_groups()
7169 int group = group_fn(i, cpu_map, &sg, tmpmask); in init_sched_build_groups()
7179 if (group_fn(j, cpu_map, NULL, tmpmask) != group) in init_sched_build_groups()
7291 cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, in cpu_to_cpu_group() argument
7310 cpu_to_core_group(int cpu, const struct cpumask *cpu_map, in cpu_to_core_group() argument
7315 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); in cpu_to_core_group()
7323 cpu_to_core_group(int cpu, const struct cpumask *cpu_map, in cpu_to_core_group() argument
7336 cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, in cpu_to_phys_group() argument
7341 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); in cpu_to_phys_group()
7344 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); in cpu_to_phys_group()
7366 static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, in cpu_to_allnodes_group() argument
7372 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); in cpu_to_allnodes_group()
7409 static void free_sched_groups(const struct cpumask *cpu_map, in free_sched_groups() argument
7414 for_each_cpu(cpu, cpu_map) { in free_sched_groups()
7424 cpumask_and(nodemask, cpumask_of_node(i), cpu_map); in free_sched_groups()
7443 static void free_sched_groups(const struct cpumask *cpu_map, in free_sched_groups() argument
7574 static int __build_sched_domains(const struct cpumask *cpu_map, in __build_sched_domains() argument
7624 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes; in __build_sched_domains()
7630 for_each_cpu(i, cpu_map) { in __build_sched_domains()
7633 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); in __build_sched_domains()
7636 if (cpumask_weight(cpu_map) > in __build_sched_domains()
7641 cpumask_copy(sched_domain_span(sd), cpu_map); in __build_sched_domains()
7642 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); in __build_sched_domains()
7656 sched_domain_span(sd), cpu_map); in __build_sched_domains()
7667 cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask); in __build_sched_domains()
7674 cpumask_and(sched_domain_span(sd), cpu_map, in __build_sched_domains()
7678 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); in __build_sched_domains()
7687 &per_cpu(cpu_sibling_map, i), cpu_map); in __build_sched_domains()
7690 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); in __build_sched_domains()
7696 for_each_cpu(i, cpu_map) { in __build_sched_domains()
7698 &per_cpu(cpu_sibling_map, i), cpu_map); in __build_sched_domains()
7702 init_sched_build_groups(this_sibling_map, cpu_map, in __build_sched_domains()
7710 for_each_cpu(i, cpu_map) { in __build_sched_domains()
7711 cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); in __build_sched_domains()
7715 init_sched_build_groups(this_core_map, cpu_map, in __build_sched_domains()
7723 cpumask_and(nodemask, cpumask_of_node(i), cpu_map); in __build_sched_domains()
7727 init_sched_build_groups(nodemask, cpu_map, in __build_sched_domains()
7735 init_sched_build_groups(cpu_map, cpu_map, in __build_sched_domains()
7746 cpumask_and(nodemask, cpumask_of_node(i), cpu_map); in __build_sched_domains()
7753 cpumask_and(domainspan, domainspan, cpu_map); in __build_sched_domains()
7779 cpumask_and(tmpmask, notcovered, cpu_map); in __build_sched_domains()
7808 for_each_cpu(i, cpu_map) { in __build_sched_domains()
7815 for_each_cpu(i, cpu_map) { in __build_sched_domains()
7822 for_each_cpu(i, cpu_map) { in __build_sched_domains()
7835 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, in __build_sched_domains()
7842 for_each_cpu(i, cpu_map) { in __build_sched_domains()
7885 free_sched_groups(cpu_map, tmpmask); in __build_sched_domains()
7891 static int build_sched_domains(const struct cpumask *cpu_map) in build_sched_domains() argument
7893 return __build_sched_domains(cpu_map, NULL); in build_sched_domains()
7923 static int arch_init_sched_domains(const struct cpumask *cpu_map) in arch_init_sched_domains() argument
7932 cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); in arch_init_sched_domains()
7940 static void arch_destroy_sched_domains(const struct cpumask *cpu_map, in arch_destroy_sched_domains() argument
7943 free_sched_groups(cpu_map, tmpmask); in arch_destroy_sched_domains()
7950 static void detach_destroy_domains(const struct cpumask *cpu_map) in detach_destroy_domains() argument
7956 for_each_cpu(i, cpu_map) in detach_destroy_domains()
7959 arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); in detach_destroy_domains()