/arch/arm/boot/dts/intel/axm/ |
D | axm5516-cpus.dtsi | 13 cpu-map { 16 cpu = <&CPU0>; 19 cpu = <&CPU1>; 22 cpu = <&CPU2>; 25 cpu = <&CPU3>; 30 cpu = <&CPU4>; 33 cpu = <&CPU5>; 36 cpu = <&CPU6>; 39 cpu = <&CPU7>; 44 cpu = <&CPU8>; [all …]
|
/arch/arm/mach-tegra/ |
D | platsmp.c | 36 static void tegra_secondary_init(unsigned int cpu) in tegra_secondary_init() argument 38 cpumask_set_cpu(cpu, &tegra_cpu_init_mask); in tegra_secondary_init() 42 static int tegra20_boot_secondary(unsigned int cpu, struct task_struct *idle) in tegra20_boot_secondary() argument 44 cpu = cpu_logical_map(cpu); in tegra20_boot_secondary() 54 tegra_put_cpu_in_reset(cpu); in tegra20_boot_secondary() 62 flowctrl_write_cpu_halt(cpu, 0); in tegra20_boot_secondary() 64 tegra_enable_cpu_clock(cpu); in tegra20_boot_secondary() 65 flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */ in tegra20_boot_secondary() 66 tegra_cpu_out_of_reset(cpu); in tegra20_boot_secondary() 70 static int tegra30_boot_secondary(unsigned int cpu, struct task_struct *idle) in tegra30_boot_secondary() argument [all …]
|
/arch/x86/include/asm/ |
D | topology.h | 48 extern int __cpu_to_node(int cpu); 51 extern int early_cpu_to_node(int cpu); 56 static inline int early_cpu_to_node(int cpu) in early_cpu_to_node() argument 58 return early_per_cpu(x86_cpu_to_node_map, cpu); in early_cpu_to_node() 94 static inline int early_cpu_to_node(int cpu) in early_cpu_to_node() argument 105 extern const struct cpumask *cpu_coregroup_mask(int cpu); 106 extern const struct cpumask *cpu_clustergroup_mask(int cpu); 108 #define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id) argument 109 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) argument 110 #define topology_logical_die_id(cpu) (cpu_data(cpu).logical_die_id) argument [all …]
|
/arch/microblaze/kernel/cpu/ |
D | cpuinfo-static.c | 23 void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) in set_cpuinfo_static() argument 28 (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) | in set_cpuinfo_static() 29 (fcpu(cpu, "xlnx,use-msr-instr") ? PVR2_USE_MSR_INSTR : 0) | in set_cpuinfo_static() 30 (fcpu(cpu, "xlnx,use-pcmp-instr") ? PVR2_USE_PCMP_INSTR : 0) | in set_cpuinfo_static() 31 (fcpu(cpu, "xlnx,use-div") ? PVR0_USE_DIV_MASK : 0); in set_cpuinfo_static() 43 ci->use_mult = fcpu(cpu, "xlnx,use-hw-mul"); in set_cpuinfo_static() 51 ci->use_fpu = fcpu(cpu, "xlnx,use-fpu"); in set_cpuinfo_static() 59 (fcpu(cpu, "xlnx,unaligned-exceptions") ? in set_cpuinfo_static() 61 (fcpu(cpu, "xlnx,ill-opcode-exception") ? in set_cpuinfo_static() 63 (fcpu(cpu, "xlnx,iopb-bus-exception") ? in set_cpuinfo_static() [all …]
|
/arch/powerpc/include/asm/ |
D | smp.h | 34 extern int cpu_to_chip_id(int cpu); 44 void (*message_pass)(int cpu, int msg); 46 void (*cause_ipi)(int cpu); 48 int (*cause_nmi_ipi)(int cpu); 67 extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); 68 extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); 78 void generic_cpu_die(unsigned int cpu); 79 void generic_set_cpu_dead(unsigned int cpu); 80 void generic_set_cpu_up(unsigned int cpu); 81 int generic_check_cpu_restart(unsigned int cpu); [all …]
|
D | topology.h | 48 static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) in update_numa_cpu_lookup_table() argument 50 numa_cpu_lookup_table[cpu] = node; in update_numa_cpu_lookup_table() 53 static inline int early_cpu_to_node(int cpu) in early_cpu_to_node() argument 57 nid = numa_cpu_lookup_table[cpu]; in early_cpu_to_node() 69 extern void map_cpu_to_node(int cpu, int node); 71 extern void unmap_cpu_from_node(unsigned long cpu); 76 static inline int early_cpu_to_node(int cpu) { return 0; } in early_cpu_to_node() argument 90 static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {} in update_numa_cpu_lookup_table() argument 105 static inline void map_cpu_to_node(int cpu, int node) {} in map_cpu_to_node() argument 107 static inline void unmap_cpu_from_node(unsigned long cpu) {} in unmap_cpu_from_node() argument [all …]
|
/arch/powerpc/kernel/ |
D | tau_6xx.c | 55 static void set_thresholds(unsigned long cpu) in set_thresholds() argument 60 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID); in set_thresholds() 63 mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie); in set_thresholds() 66 static void TAUupdate(int cpu) in TAUupdate() argument 77 if (tau[cpu].low >= step_size) { in TAUupdate() 78 tau[cpu].low -= step_size; in TAUupdate() 79 tau[cpu].high -= (step_size - window_expand); in TAUupdate() 81 tau[cpu].grew = 1; in TAUupdate() 88 if (tau[cpu].high <= 127 - step_size) { in TAUupdate() 89 tau[cpu].low += (step_size - window_expand); in TAUupdate() [all …]
|
D | smp.c | 286 void smp_muxed_ipi_set_message(int cpu, int msg) in smp_muxed_ipi_set_message() argument 288 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_message() 298 void smp_muxed_ipi_message_pass(int cpu, int msg) in smp_muxed_ipi_message_pass() argument 300 smp_muxed_ipi_set_message(cpu, msg); in smp_muxed_ipi_message_pass() 306 smp_ops->cause_ipi(cpu); in smp_muxed_ipi_message_pass() 360 static inline void do_message_pass(int cpu, int msg) in do_message_pass() argument 363 smp_ops->message_pass(cpu, msg); in do_message_pass() 366 smp_muxed_ipi_message_pass(cpu, msg); in do_message_pass() 370 void arch_smp_send_reschedule(int cpu) in arch_smp_send_reschedule() argument 373 do_message_pass(cpu, PPC_MSG_RESCHEDULE); in arch_smp_send_reschedule() [all …]
|
D | watchdog.c | 148 int cpu = raw_smp_processor_id(); in wd_lockup_ipi() local 151 pr_emerg("CPU %d Hard LOCKUP\n", cpu); in wd_lockup_ipi() 153 cpu, tb, per_cpu(wd_timer_tb, cpu), in wd_lockup_ipi() 154 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); in wd_lockup_ipi() 182 static bool set_cpu_stuck(int cpu) in set_cpu_stuck() argument 184 cpumask_set_cpu(cpu, &wd_smp_cpus_stuck); in set_cpu_stuck() 185 cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); in set_cpu_stuck() 200 static void watchdog_smp_panic(int cpu) in watchdog_smp_panic() argument 213 if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) in watchdog_smp_panic() 220 if (c == cpu) in watchdog_smp_panic() [all …]
|
/arch/s390/include/asm/ |
D | topology.h | 9 struct cpu; 29 #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) argument 30 #define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) argument 31 #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask) argument 32 #define topology_core_id(cpu) (cpu_topology[cpu].core_id) argument 33 #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) argument 34 #define topology_book_id(cpu) (cpu_topology[cpu].book_id) argument 35 #define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask) argument 36 #define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id) argument 37 #define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask) argument [all …]
|
/arch/arm/mach-meson/ |
D | platsmp.c | 38 static struct reset_control *meson_smp_get_core_reset(int cpu) in meson_smp_get_core_reset() argument 40 struct device_node *np = of_get_cpu_node(cpu, 0); in meson_smp_get_core_reset() 45 static void meson_smp_set_cpu_ctrl(int cpu, bool on_off) in meson_smp_set_cpu_ctrl() argument 50 val |= BIT(cpu); in meson_smp_set_cpu_ctrl() 52 val &= ~BIT(cpu); in meson_smp_set_cpu_ctrl() 116 static void meson_smp_begin_secondary_boot(unsigned int cpu) in meson_smp_begin_secondary_boot() argument 125 sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu)); in meson_smp_begin_secondary_boot() 131 scu_cpu_power_enable(scu_base, cpu); in meson_smp_begin_secondary_boot() 134 static int meson_smp_finalize_secondary_boot(unsigned int cpu) in meson_smp_finalize_secondary_boot() argument 139 while (readl(sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu))) { in meson_smp_finalize_secondary_boot() [all …]
|
/arch/arm/mach-bcm/ |
D | platsmp-brcmstb.c | 59 static int per_cpu_sw_state_rd(u32 cpu) in per_cpu_sw_state_rd() argument 61 sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); in per_cpu_sw_state_rd() 62 return per_cpu(per_cpu_sw_state, cpu); in per_cpu_sw_state_rd() 65 static void per_cpu_sw_state_wr(u32 cpu, int val) in per_cpu_sw_state_wr() argument 68 per_cpu(per_cpu_sw_state, cpu) = val; in per_cpu_sw_state_wr() 69 sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); in per_cpu_sw_state_wr() 72 static inline void per_cpu_sw_state_wr(u32 cpu, int val) { } in per_cpu_sw_state_wr() argument 75 static void __iomem *pwr_ctrl_get_base(u32 cpu) in pwr_ctrl_get_base() argument 78 base += (cpu_logical_map(cpu) * 4); in pwr_ctrl_get_base() 82 static u32 pwr_ctrl_rd(u32 cpu) in pwr_ctrl_rd() argument [all …]
|
/arch/x86/xen/ |
D | smp.c | 33 void xen_smp_intr_free(unsigned int cpu) in xen_smp_intr_free() argument 35 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free() 36 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free() 37 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { in xen_smp_intr_free() 38 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); in xen_smp_intr_free() 39 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free() 41 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free() 42 per_cpu(xen_callfunc_irq, cpu).name = NULL; in xen_smp_intr_free() 43 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { in xen_smp_intr_free() 44 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); in xen_smp_intr_free() [all …]
|
D | smp_pv.c | 61 int cpu; in cpu_bringup() local 74 cpu = smp_processor_id(); in cpu_bringup() 75 smp_store_cpu_info(cpu); in cpu_bringup() 76 cpu_data(cpu).x86_max_cores = 1; in cpu_bringup() 77 set_cpu_sibling_map(cpu); in cpu_bringup() 83 notify_cpu_starting(cpu); in cpu_bringup() 85 set_cpu_online(cpu, true); in cpu_bringup() 99 void xen_smp_intr_free_pv(unsigned int cpu) in xen_smp_intr_free_pv() argument 101 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free_pv() 102 per_cpu(xen_irq_work, cpu).name = NULL; in xen_smp_intr_free_pv() [all …]
|
/arch/loongarch/kernel/ |
D | smp.c | 81 unsigned int cpu, i; in show_ipi_list() local 85 for_each_online_cpu(cpu) in show_ipi_list() 86 seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]); in show_ipi_list() 91 static inline void set_cpu_core_map(int cpu) in set_cpu_core_map() argument 95 cpumask_set_cpu(cpu, &cpu_core_setup_map); in set_cpu_core_map() 98 if (cpu_data[cpu].package == cpu_data[i].package) { in set_cpu_core_map() 99 cpumask_set_cpu(i, &cpu_core_map[cpu]); in set_cpu_core_map() 100 cpumask_set_cpu(cpu, &cpu_core_map[i]); in set_cpu_core_map() 105 static inline void set_cpu_sibling_map(int cpu) in set_cpu_sibling_map() argument 109 cpumask_set_cpu(cpu, &cpu_sibling_setup_map); in set_cpu_sibling_map() [all …]
|
/arch/loongarch/include/asm/ |
D | mmu_context.h | 24 static inline u64 asid_version_mask(unsigned int cpu) in asid_version_mask() argument 26 return ~(u64)(cpu_asid_mask(&cpu_data[cpu])); in asid_version_mask() 29 static inline u64 asid_first_version(unsigned int cpu) in asid_first_version() argument 31 return cpu_asid_mask(&cpu_data[cpu]) + 1; in asid_first_version() 34 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) argument 35 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) argument 36 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu])) argument 38 static inline int asid_valid(struct mm_struct *mm, unsigned int cpu) in asid_valid() argument 40 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu)) in asid_valid() 52 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) in get_new_mmu_context() argument [all …]
|
/arch/xtensa/include/asm/ |
D | mmu_context.h | 35 #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu) argument 70 static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_new_mmu_context() argument 72 unsigned long asid = cpu_asid_cache(cpu); in get_new_mmu_context() 81 cpu_asid_cache(cpu) = asid; in get_new_mmu_context() 82 mm->context.asid[cpu] = asid; in get_new_mmu_context() 83 mm->context.cpu = cpu; in get_new_mmu_context() 86 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_mmu_context() argument 93 unsigned long asid = mm->context.asid[cpu]; in get_mmu_context() 96 ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK)) in get_mmu_context() 97 get_new_mmu_context(mm, cpu); in get_mmu_context() [all …]
|
/arch/ia64/kernel/ |
D | err_inject.c | 61 u32 cpu=dev->id; \ 62 return sprintf(buf, "%llx\n", name[cpu]); \ 70 unsigned int cpu=dev->id; \ 71 name[cpu] = simple_strtoull(buf, NULL, 16); \ 84 unsigned int cpu=dev->id; in show() local 88 printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); in show() 89 printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]); in show() 90 printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]); in show() 92 err_data_buffer[cpu].data1, in show() 93 err_data_buffer[cpu].data2, in show() [all …]
|
/arch/arm64/kernel/ |
D | smp.c | 88 static void ipi_setup(int cpu); 91 static void ipi_teardown(int cpu); 92 static int op_cpu_kill(unsigned int cpu); 94 static inline int op_cpu_kill(unsigned int cpu) in op_cpu_kill() argument 105 static int boot_secondary(unsigned int cpu, struct task_struct *idle) in boot_secondary() argument 107 const struct cpu_operations *ops = get_cpu_ops(cpu); in boot_secondary() 110 return ops->cpu_boot(cpu); in boot_secondary() 117 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument 130 ret = boot_secondary(cpu, idle); in __cpu_up() 132 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); in __cpu_up() [all …]
|
D | topology.c | 28 static bool __init acpi_cpu_is_threaded(int cpu) in acpi_cpu_is_threaded() argument 30 int is_threaded = acpi_pptt_cpu_is_thread(cpu); in acpi_cpu_is_threaded() 48 int cpu, topology_id; in parse_acpi_topology() local 53 for_each_possible_cpu(cpu) { in parse_acpi_topology() 54 topology_id = find_acpi_cpu_topology(cpu, 0); in parse_acpi_topology() 58 if (acpi_cpu_is_threaded(cpu)) { in parse_acpi_topology() 59 cpu_topology[cpu].thread_id = topology_id; in parse_acpi_topology() 60 topology_id = find_acpi_cpu_topology(cpu, 1); in parse_acpi_topology() 61 cpu_topology[cpu].core_id = topology_id; in parse_acpi_topology() 63 cpu_topology[cpu].thread_id = -1; in parse_acpi_topology() [all …]
|
/arch/sh/include/asm/ |
D | mmu_context.h | 37 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) argument 40 #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) argument 42 #define cpu_asid(cpu, mm) \ argument 43 (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK) 55 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_mmu_context() argument 57 unsigned long asid = asid_cache(cpu); in get_mmu_context() 60 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context() 80 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_mmu_context() 103 static inline void activate_context(struct mm_struct *mm, unsigned int cpu) in activate_context() argument 105 get_mmu_context(mm, cpu); in activate_context() [all …]
|
/arch/riscv/kernel/ |
D | cpu-hotplug.c | 19 bool cpu_has_hotplug(unsigned int cpu) in cpu_has_hotplug() argument 21 if (cpu_ops[cpu]->cpu_stop) in cpu_has_hotplug() 33 unsigned int cpu = smp_processor_id(); in __cpu_disable() local 35 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_stop) in __cpu_disable() 38 if (cpu_ops[cpu]->cpu_disable) in __cpu_disable() 39 ret = cpu_ops[cpu]->cpu_disable(cpu); in __cpu_disable() 44 remove_cpu_topology(cpu); in __cpu_disable() 45 numa_remove_cpu(cpu); in __cpu_disable() 46 set_cpu_online(cpu, false); in __cpu_disable() 58 void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) in arch_cpuhp_cleanup_dead_cpu() argument [all …]
|
/arch/x86/kernel/ |
D | setup_percpu.c | 68 unsigned int cpu; in pcpu_need_numa() local 70 for_each_possible_cpu(cpu) { in pcpu_need_numa() 71 int node = early_cpu_to_node(cpu); in pcpu_need_numa() 96 static int __init pcpu_cpu_to_node(int cpu) in pcpu_cpu_to_node() argument 98 return early_cpu_to_node(cpu); in pcpu_cpu_to_node() 106 static inline void setup_percpu_segment(int cpu) in setup_percpu_segment() argument 109 struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu), in setup_percpu_segment() 112 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S); in setup_percpu_segment() 118 unsigned int cpu; in setup_per_cpu_areas() local 169 for_each_possible_cpu(cpu) { in setup_per_cpu_areas() [all …]
|
/arch/arm/kernel/ |
D | smp.c | 90 static void ipi_setup(int cpu); 112 static int secondary_biglittle_prepare(unsigned int cpu) in secondary_biglittle_prepare() argument 114 if (!cpu_vtable[cpu]) in secondary_biglittle_prepare() 115 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL); in secondary_biglittle_prepare() 117 return cpu_vtable[cpu] ? 0 : -ENOMEM; in secondary_biglittle_prepare() 125 static int secondary_biglittle_prepare(unsigned int cpu) in secondary_biglittle_prepare() argument 135 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument 142 ret = secondary_biglittle_prepare(cpu); in __cpu_up() 165 ret = smp_ops.smp_boot_secondary(cpu, idle); in __cpu_up() 174 if (!cpu_online(cpu)) { in __cpu_up() [all …]
|
/arch/arm/common/ |
D | mcpm_entry.c | 34 static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_going_down() argument 36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down() 37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down() 47 static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_down() argument 50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down() 51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down() 82 static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) in __mcpm_outbound_enter_critical() argument 107 if (i == cpu) in __mcpm_outbound_enter_critical() 111 cpustate = c->cpus[i].cpu; in __mcpm_outbound_enter_critical() 116 sync_cache_r(&c->cpus[i].cpu); in __mcpm_outbound_enter_critical() [all …]
|