/arch/x86/hyperv/ |
D | mmu.c | 20 static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, 55 static void hyperv_flush_tlb_others(const struct cpumask *cpus, in hyperv_flush_tlb_others() argument 64 trace_hyperv_mmu_flush_tlb_others(cpus, info); in hyperv_flush_tlb_others() 69 if (cpumask_empty(cpus)) in hyperv_flush_tlb_others() 98 if (cpumask_equal(cpus, cpu_present_mask)) { in hyperv_flush_tlb_others() 112 if (hv_cpu_number_to_vp_number(cpumask_last(cpus)) >= 64) in hyperv_flush_tlb_others() 115 for_each_cpu(cpu, cpus) { in hyperv_flush_tlb_others() 153 status = hyperv_flush_tlb_others_ex(cpus, info); in hyperv_flush_tlb_others() 161 native_flush_tlb_others(cpus, info); in hyperv_flush_tlb_others() 164 static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, in hyperv_flush_tlb_others_ex() argument [all …]
|
/arch/x86/include/asm/trace/ |
D | hyperv.h | 12 TP_PROTO(const struct cpumask *cpus, 14 TP_ARGS(cpus, info), 21 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); 60 TP_PROTO(const struct cpumask *cpus, 62 TP_ARGS(cpus, vector), 67 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
|
/arch/mips/cavium-octeon/ |
D | smp.c | 142 int cpus; in octeon_smp_setup() local 161 cpus = 1; in octeon_smp_setup() 164 set_cpu_possible(cpus, true); in octeon_smp_setup() 165 set_cpu_present(cpus, true); in octeon_smp_setup() 166 __cpu_number_map[id] = cpus; in octeon_smp_setup() 167 __cpu_logical_map[cpus] = id; in octeon_smp_setup() 168 cpus++; in octeon_smp_setup() 181 set_cpu_possible(cpus, true); in octeon_smp_setup() 182 __cpu_number_map[id] = cpus; in octeon_smp_setup() 183 __cpu_logical_map[cpus] = id; in octeon_smp_setup() [all …]
|
/arch/s390/kernel/ |
D | sthyi.c | 234 for (i = 0; i < block->hdr.cpus; i++) { in fill_diag_mac() 235 switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) { in fill_diag_mac() 237 if (block->cpus[i].weight == DED_WEIGHT) in fill_diag_mac() 243 if (block->cpus[i].weight == DED_WEIGHT) in fill_diag_mac() 263 if (!(block->cpus[i].cflag & DIAG204_CPU_ONLINE)) in lpar_cpu_inf() 266 switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) { in lpar_cpu_inf() 269 if (block->cpus[i].cur_weight < DED_WEIGHT) in lpar_cpu_inf() 270 weight_cp |= block->cpus[i].cur_weight; in lpar_cpu_inf() 274 if (block->cpus[i].cur_weight < DED_WEIGHT) in lpar_cpu_inf() 275 weight_ifl |= block->cpus[i].cur_weight; in lpar_cpu_inf() [all …]
|
/arch/arm/common/ |
D | mcpm_entry.c | 36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down() 37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down() 50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down() 51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down() 103 sync_cache_r(&c->cpus); in __mcpm_outbound_enter_critical() 111 cpustate = c->cpus[i].cpu; in __mcpm_outbound_enter_critical() 116 sync_cache_r(&c->cpus[i].cpu); in __mcpm_outbound_enter_critical() 439 mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; in mcpm_sync_init() 445 mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; in mcpm_sync_init()
|
/arch/ia64/kernel/ |
D | smp.c | 295 cpumask_var_t cpus; in smp_flush_tlb_mm() local 304 if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) { in smp_flush_tlb_mm() 308 cpumask_copy(cpus, mm_cpumask(mm)); in smp_flush_tlb_mm() 309 smp_call_function_many(cpus, in smp_flush_tlb_mm() 311 free_cpumask_var(cpus); in smp_flush_tlb_mm()
|
/arch/s390/include/asm/ |
D | diag.h | 131 __u8 cpus; member 138 __u8 cpus; member 190 __u8 cpus; member 197 __u8 cpus; member 224 struct diag204_x_cpu_info cpus[]; member 229 struct diag204_x_phys_cpu cpus[]; member
|
/arch/x86/kernel/ |
D | tsc_sync.c | 312 int cpus = 2; in check_tsc_sync_source() local 334 while (atomic_read(&start_count) != cpus - 1) { in check_tsc_sync_source() 349 while (atomic_read(&stop_count) != cpus-1) in check_tsc_sync_source() 405 int cpus = 2; in check_tsc_sync_target() local 431 while (atomic_read(&start_count) != cpus) in check_tsc_sync_target() 449 while (atomic_read(&stop_count) != cpus) in check_tsc_sync_target()
|
/arch/arm/kernel/ |
D | devtree.c | 74 struct device_node *cpu, *cpus; in arm_dt_init_cpu_maps() local 81 cpus = of_find_node_by_path("/cpus"); in arm_dt_init_cpu_maps() 83 if (!cpus) in arm_dt_init_cpu_maps() 167 set_smp_ops_by_method(cpus); in arm_dt_init_cpu_maps()
|
D | smp.c | 761 struct cpumask *cpus = freq->policy->cpus; in cpufreq_callback() local 762 int cpu, first = cpumask_first(cpus); in cpufreq_callback() 769 for_each_cpu(cpu, cpus) { in cpufreq_callback() 789 for_each_cpu(cpu, cpus) in cpufreq_callback()
|
/arch/powerpc/kernel/ |
D | rtas.c | 852 cpumask_var_t cpus) in rtas_cpu_state_change_mask() argument 854 if (!cpumask_empty(cpus)) { in rtas_cpu_state_change_mask() 855 cpumask_clear(cpus); in rtas_cpu_state_change_mask() 865 cpumask_var_t cpus) in rtas_cpu_state_change_mask() argument 871 if (cpumask_empty(cpus)) in rtas_cpu_state_change_mask() 874 for_each_cpu(cpu, cpus) { in rtas_cpu_state_change_mask() 894 cpumask_shift_right(cpus, cpus, cpu); in rtas_cpu_state_change_mask() 895 cpumask_shift_left(cpus, cpus, cpu); in rtas_cpu_state_change_mask() 899 cpumask_clear_cpu(cpu, cpus); in rtas_cpu_state_change_mask() 909 int rtas_online_cpus_mask(cpumask_var_t cpus) in rtas_online_cpus_mask() argument [all …]
|
/arch/powerpc/platforms/powermac/ |
D | smp.c | 632 struct device_node *cpus; in smp_core99_pfunc_tb_freeze() local 635 cpus = of_find_node_by_path("/cpus"); in smp_core99_pfunc_tb_freeze() 636 BUG_ON(cpus == NULL); in smp_core99_pfunc_tb_freeze() 639 pmf_call_function(cpus, "cpu-timebase", &args); in smp_core99_pfunc_tb_freeze() 640 of_node_put(cpus); in smp_core99_pfunc_tb_freeze() 710 struct device_node *cpus = in smp_core99_setup() local 712 if (cpus && in smp_core99_setup() 713 of_get_property(cpus, "platform-cpu-timebase", NULL)) { in smp_core99_setup() 766 struct device_node *cpus; in smp_core99_probe() local 772 for_each_node_by_type(cpus, "cpu") in smp_core99_probe()
|
/arch/arc/boot/dts/ |
D | eznps.dts | 13 present-cpus = "0-1,16-17"; 14 possible-cpus = "0-4095";
|
/arch/arm/boot/dts/ |
D | bcm2836.dtsi | 37 cpus: cpus { label
|
D | bcm2837.dtsi | 36 cpus: cpus { label
|
D | bcm2835.dtsi | 7 cpus {
|
D | bcm47081.dtsi | 22 cpus {
|
D | axm5516-amarillo.dts | 13 #include "axm5516-cpus.dtsi"
|
/arch/mips/boot/dts/xilfpga/ |
D | microAptiv.dtsi | 7 cpus {
|
/arch/powerpc/boot/dts/fsl/ |
D | mpc8536ds.dts | 14 cpus { 15 #cpus = <1>;
|
D | mpc8536ds_36b.dts | 14 cpus { 15 #cpus = <1>;
|
/arch/powerpc/platforms/cell/ |
D | cpufreq_spudemand.c | 94 for_each_cpu(i, policy->cpus) { in spu_gov_start() 117 for_each_cpu (i, policy->cpus) { in spu_gov_stop()
|
/arch/ia64/mm/ |
D | discontig.c | 115 unsigned long pernodesize = 0, cpus; in compute_pernodesize() local 117 cpus = early_nr_cpus_node(node); in compute_pernodesize() 118 pernodesize += PERCPU_PAGE_SIZE * cpus; in compute_pernodesize() 263 int cpus = early_nr_cpus_node(node); in fill_pernode() local 270 pernode += PERCPU_PAGE_SIZE * cpus; in fill_pernode()
|
/arch/arm64/boot/dts/marvell/ |
D | armada-372x.dtsi | 18 cpus {
|
D | armada-ap806-dual.dtsi | 14 cpus {
|