/arch/riscv/kernel/ |
D | sys_riscv.c | 86 const struct cpumask *cpus) in hwprobe_arch_id() argument 92 for_each_cpu(cpu, cpus) { in hwprobe_arch_id() 126 const struct cpumask *cpus) in hwprobe_isa_ext0() argument 145 for_each_cpu(cpu, cpus) { in hwprobe_isa_ext0() 168 static u64 hwprobe_misaligned(const struct cpumask *cpus) in hwprobe_misaligned() argument 173 for_each_cpu(cpu, cpus) { in hwprobe_misaligned() 192 const struct cpumask *cpus) in hwprobe_one_pair() argument 198 hwprobe_arch_id(pair, cpus); in hwprobe_one_pair() 211 hwprobe_isa_ext0(pair, cpus); in hwprobe_one_pair() 215 pair->value = hwprobe_misaligned(cpus); in hwprobe_one_pair() [all …]
|
/arch/riscv/kernel/vdso/ |
D | hwprobe.c | 11 size_t cpu_count, unsigned long *cpus, 16 size_t cpu_count, unsigned long *cpus, 20 size_t cpu_count, unsigned long *cpus, in __vdso_riscv_hwprobe() argument 25 bool all_cpus = !cpu_count && !cpus; in __vdso_riscv_hwprobe() 36 return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags); in __vdso_riscv_hwprobe()
|
/arch/x86/hyperv/ |
D | mmu.c | 20 static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, 60 static void hyperv_flush_tlb_multi(const struct cpumask *cpus, in hyperv_flush_tlb_multi() argument 69 trace_hyperv_mmu_flush_tlb_multi(cpus, info); in hyperv_flush_tlb_multi() 97 if (cpumask_equal(cpus, cpu_present_mask)) { in hyperv_flush_tlb_multi() 111 cpu = cpumask_last(cpus); in hyperv_flush_tlb_multi() 116 for_each_cpu(cpu, cpus) { in hyperv_flush_tlb_multi() 162 status = hyperv_flush_tlb_others_ex(cpus, info); in hyperv_flush_tlb_multi() 170 native_flush_tlb_multi(cpus, info); in hyperv_flush_tlb_multi() 173 static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, in hyperv_flush_tlb_others_ex() argument 201 nr_bank = cpumask_to_vpset_skip(&flush->hv_vp_set, cpus, in hyperv_flush_tlb_others_ex()
|
/arch/x86/include/asm/trace/ |
D | hyperv.h | 12 TP_PROTO(const struct cpumask *cpus, 14 TP_ARGS(cpus, info), 21 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); 60 TP_PROTO(const struct cpumask *cpus, 62 TP_ARGS(cpus, vector), 67 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
|
/arch/mips/cavium-octeon/ |
D | smp.c | 143 int cpus; in octeon_smp_setup() local 162 cpus = 1; in octeon_smp_setup() 165 set_cpu_possible(cpus, true); in octeon_smp_setup() 166 set_cpu_present(cpus, true); in octeon_smp_setup() 167 __cpu_number_map[id] = cpus; in octeon_smp_setup() 168 __cpu_logical_map[cpus] = id; in octeon_smp_setup() 169 cpus++; in octeon_smp_setup() 182 set_cpu_possible(cpus, true); in octeon_smp_setup() 183 __cpu_number_map[id] = cpus; in octeon_smp_setup() 184 __cpu_logical_map[cpus] = id; in octeon_smp_setup() [all …]
|
/arch/s390/kernel/ |
D | sthyi.c | 234 for (i = 0; i < block->hdr.cpus; i++) { in fill_diag_mac() 235 switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) { in fill_diag_mac() 237 if (block->cpus[i].weight == DED_WEIGHT) in fill_diag_mac() 243 if (block->cpus[i].weight == DED_WEIGHT) in fill_diag_mac() 263 if (!(block->cpus[i].cflag & DIAG204_CPU_ONLINE)) in lpar_cpu_inf() 266 switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) { in lpar_cpu_inf() 269 if (block->cpus[i].cur_weight < DED_WEIGHT) in lpar_cpu_inf() 270 weight_cp |= block->cpus[i].cur_weight; in lpar_cpu_inf() 274 if (block->cpus[i].cur_weight < DED_WEIGHT) in lpar_cpu_inf() 275 weight_ifl |= block->cpus[i].cur_weight; in lpar_cpu_inf() [all …]
|
/arch/arm/common/ |
D | mcpm_entry.c | 36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down() 37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down() 50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down() 51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down() 103 sync_cache_r(&c->cpus); in __mcpm_outbound_enter_critical() 111 cpustate = c->cpus[i].cpu; in __mcpm_outbound_enter_critical() 116 sync_cache_r(&c->cpus[i].cpu); in __mcpm_outbound_enter_critical() 439 mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; in mcpm_sync_init() 445 mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; in mcpm_sync_init()
|
/arch/ia64/kernel/ |
D | smp.c | 294 cpumask_var_t cpus; in smp_flush_tlb_mm() local 303 if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) { in smp_flush_tlb_mm() 307 cpumask_copy(cpus, mm_cpumask(mm)); in smp_flush_tlb_mm() 308 smp_call_function_many(cpus, in smp_flush_tlb_mm() 310 free_cpumask_var(cpus); in smp_flush_tlb_mm()
|
/arch/arm/kernel/ |
D | devtree.c | 74 struct device_node *cpu, *cpus; in arm_dt_init_cpu_maps() local 81 cpus = of_find_node_by_path("/cpus"); in arm_dt_init_cpu_maps() 83 if (!cpus) in arm_dt_init_cpu_maps() 149 set_smp_ops_by_method(cpus); in arm_dt_init_cpu_maps()
|
D | smp.c | 801 struct cpumask *cpus = freq->policy->cpus; in cpufreq_callback() local 802 int cpu, first = cpumask_first(cpus); in cpufreq_callback() 809 for_each_cpu(cpu, cpus) { in cpufreq_callback() 829 for_each_cpu(cpu, cpus) in cpufreq_callback()
|
/arch/mips/kernel/ |
D | time.c | 40 struct cpumask *cpus = freq->policy->cpus; in cpufreq_callback() local 73 for_each_cpu(cpu, cpus) { in cpufreq_callback()
|
/arch/arm64/kernel/ |
D | topology.c | 196 static void amu_fie_setup(const struct cpumask *cpus) in amu_fie_setup() argument 201 if (unlikely(cpumask_subset(cpus, amu_fie_cpus))) in amu_fie_setup() 204 for_each_cpu(cpu, cpus) { in amu_fie_setup() 212 cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus); in amu_fie_setup() 217 cpumask_pr_args(cpus)); in amu_fie_setup()
|
/arch/x86/kernel/ |
D | tsc_sync.c | 357 int cpus = 2; in check_tsc_sync_source() local 370 while (atomic_read(&start_count) != cpus - 1) in check_tsc_sync_source() 380 while (atomic_read(&stop_count) != cpus-1) in check_tsc_sync_source() 436 int cpus = 2; in check_tsc_sync_target() local 463 while (atomic_read(&start_count) != cpus) in check_tsc_sync_target() 481 while (atomic_read(&stop_count) != cpus) in check_tsc_sync_target()
|
/arch/powerpc/platforms/powermac/ |
D | smp.c | 628 struct device_node *cpus; in smp_core99_pfunc_tb_freeze() local 631 cpus = of_find_node_by_path("/cpus"); in smp_core99_pfunc_tb_freeze() 632 BUG_ON(cpus == NULL); in smp_core99_pfunc_tb_freeze() 635 pmf_call_function(cpus, "cpu-timebase", &args); in smp_core99_pfunc_tb_freeze() 636 of_node_put(cpus); in smp_core99_pfunc_tb_freeze() 706 struct device_node *cpus = in smp_core99_setup() local 708 if (cpus && in smp_core99_setup() 709 of_property_read_bool(cpus, "platform-cpu-timebase")) { in smp_core99_setup() 714 of_node_put(cpus); in smp_core99_setup() 763 struct device_node *cpus; in smp_core99_probe() local [all …]
|
/arch/s390/include/asm/ |
D | diag.h | 147 __u8 cpus; member 154 __u8 cpus; member 206 __u8 cpus; member 213 __u8 cpus; member 240 struct diag204_x_cpu_info cpus[]; member 245 struct diag204_x_phys_cpu cpus[]; member
|
/arch/arm64/boot/dts/apple/ |
D | t6001.dtsi | 54 cpus = <&cpu_e00 &cpu_e01>; 59 cpus = <&cpu_p00 &cpu_p01 &cpu_p02 &cpu_p03
|
/arch/ia64/mm/ |
D | discontig.c | 115 unsigned long pernodesize = 0, cpus; in compute_pernodesize() local 117 cpus = early_nr_cpus_node(node); in compute_pernodesize() 118 pernodesize += PERCPU_PAGE_SIZE * cpus; in compute_pernodesize() 263 int cpus = early_nr_cpus_node(node); in fill_pernode() local 270 pernode += PERCPU_PAGE_SIZE * cpus; in fill_pernode()
|
/arch/powerpc/platforms/cell/ |
D | cpufreq_spudemand.c | 94 for_each_cpu(i, policy->cpus) { in spu_gov_start() 117 for_each_cpu (i, policy->cpus) { in spu_gov_stop()
|
/arch/powerpc/boot/dts/fsl/ |
D | mpc8536ds.dts | 14 cpus { 15 #cpus = <1>;
|
D | mpc8536ds_36b.dts | 14 cpus { 15 #cpus = <1>;
|
/arch/mips/boot/dts/xilfpga/ |
D | microAptiv.dtsi | 7 cpus {
|
/arch/mips/boot/dts/realtek/ |
D | rtl838x.dtsi | 4 cpus {
|
/arch/arm64/boot/dts/renesas/ |
D | r9a07g044l1.dtsi | 14 cpus {
|
D | r9a07g054l1.dtsi | 14 cpus {
|
/arch/arm64/boot/dts/broadcom/bcmbca/ |
D | bcm4906.dtsi | 6 cpus {
|