/arch/arm/boot/dts/ |
D | axm5516-cpus.dtsi | 13 cpu-map { 16 cpu = <&CPU0>; 19 cpu = <&CPU1>; 22 cpu = <&CPU2>; 25 cpu = <&CPU3>; 30 cpu = <&CPU4>; 33 cpu = <&CPU5>; 36 cpu = <&CPU6>; 39 cpu = <&CPU7>; 44 cpu = <&CPU8>; [all …]
|
/arch/arm/mach-tegra/ |
D | platsmp.c | 36 static void tegra_secondary_init(unsigned int cpu) in tegra_secondary_init() argument 38 cpumask_set_cpu(cpu, &tegra_cpu_init_mask); in tegra_secondary_init() 42 static int tegra20_boot_secondary(unsigned int cpu, struct task_struct *idle) in tegra20_boot_secondary() argument 44 cpu = cpu_logical_map(cpu); in tegra20_boot_secondary() 54 tegra_put_cpu_in_reset(cpu); in tegra20_boot_secondary() 62 flowctrl_write_cpu_halt(cpu, 0); in tegra20_boot_secondary() 64 tegra_enable_cpu_clock(cpu); in tegra20_boot_secondary() 65 flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */ in tegra20_boot_secondary() 66 tegra_cpu_out_of_reset(cpu); in tegra20_boot_secondary() 70 static int tegra30_boot_secondary(unsigned int cpu, struct task_struct *idle) in tegra30_boot_secondary() argument [all …]
|
/arch/microblaze/kernel/cpu/ |
D | cpuinfo-static.c | 23 void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) in set_cpuinfo_static() argument 28 (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) | in set_cpuinfo_static() 29 (fcpu(cpu, "xlnx,use-msr-instr") ? PVR2_USE_MSR_INSTR : 0) | in set_cpuinfo_static() 30 (fcpu(cpu, "xlnx,use-pcmp-instr") ? PVR2_USE_PCMP_INSTR : 0) | in set_cpuinfo_static() 31 (fcpu(cpu, "xlnx,use-div") ? PVR0_USE_DIV_MASK : 0); in set_cpuinfo_static() 43 ci->use_mult = fcpu(cpu, "xlnx,use-hw-mul"); in set_cpuinfo_static() 51 ci->use_fpu = fcpu(cpu, "xlnx,use-fpu"); in set_cpuinfo_static() 59 (fcpu(cpu, "xlnx,unaligned-exceptions") ? in set_cpuinfo_static() 61 (fcpu(cpu, "xlnx,ill-opcode-exception") ? in set_cpuinfo_static() 63 (fcpu(cpu, "xlnx,iopb-bus-exception") ? in set_cpuinfo_static() [all …]
|
/arch/powerpc/include/asm/ |
D | smp.h | 33 extern int cpu_to_chip_id(int cpu); 43 void (*message_pass)(int cpu, int msg); 45 void (*cause_ipi)(int cpu); 47 int (*cause_nmi_ipi)(int cpu); 63 extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); 64 extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); 74 void generic_cpu_die(unsigned int cpu); 75 void generic_set_cpu_dead(unsigned int cpu); 76 void generic_set_cpu_up(unsigned int cpu); 77 int generic_check_cpu_restart(unsigned int cpu); [all …]
|
D | cputhreads.h | 49 int i, cpu; in cpu_thread_mask_to_cores() local 55 cpu = cpumask_next_and(-1, &tmp, cpu_online_mask); in cpu_thread_mask_to_cores() 56 if (cpu < nr_cpu_ids) in cpu_thread_mask_to_cores() 57 cpumask_set_cpu(cpu, &res); in cpu_thread_mask_to_cores() 74 int cpu_core_index_of_thread(int cpu); 77 static inline int cpu_core_index_of_thread(int cpu) { return cpu; } in cpu_core_index_of_thread() argument 81 static inline int cpu_thread_in_core(int cpu) in cpu_thread_in_core() argument 83 return cpu & (threads_per_core - 1); in cpu_thread_in_core() 86 static inline int cpu_thread_in_subcore(int cpu) in cpu_thread_in_subcore() argument 88 return cpu & (threads_per_subcore - 1); in cpu_thread_in_subcore() [all …]
|
D | topology.h | 48 static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) in update_numa_cpu_lookup_table() argument 50 numa_cpu_lookup_table[cpu] = node; in update_numa_cpu_lookup_table() 53 static inline int early_cpu_to_node(int cpu) in early_cpu_to_node() argument 57 nid = numa_cpu_lookup_table[cpu]; in early_cpu_to_node() 69 extern void map_cpu_to_node(int cpu, int node); 71 extern void unmap_cpu_from_node(unsigned long cpu); 76 static inline int early_cpu_to_node(int cpu) { return 0; } in early_cpu_to_node() argument 90 static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {} in update_numa_cpu_lookup_table() argument 105 static inline void map_cpu_to_node(int cpu, int node) {} in map_cpu_to_node() argument 107 static inline void unmap_cpu_from_node(unsigned long cpu) {} in unmap_cpu_from_node() argument [all …]
|
/arch/powerpc/kernel/ |
D | tau_6xx.c | 56 static void set_thresholds(unsigned long cpu) in set_thresholds() argument 61 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID); in set_thresholds() 64 mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie); in set_thresholds() 67 static void TAUupdate(int cpu) in TAUupdate() argument 78 if (tau[cpu].low >= step_size) { in TAUupdate() 79 tau[cpu].low -= step_size; in TAUupdate() 80 tau[cpu].high -= (step_size - window_expand); in TAUupdate() 82 tau[cpu].grew = 1; in TAUupdate() 89 if (tau[cpu].high <= 127 - step_size) { in TAUupdate() 90 tau[cpu].low += (step_size - window_expand); in TAUupdate() [all …]
|
D | smp.c | 285 void smp_muxed_ipi_set_message(int cpu, int msg) in smp_muxed_ipi_set_message() argument 287 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_message() 297 void smp_muxed_ipi_message_pass(int cpu, int msg) in smp_muxed_ipi_message_pass() argument 299 smp_muxed_ipi_set_message(cpu, msg); in smp_muxed_ipi_message_pass() 305 smp_ops->cause_ipi(cpu); in smp_muxed_ipi_message_pass() 359 static inline void do_message_pass(int cpu, int msg) in do_message_pass() argument 362 smp_ops->message_pass(cpu, msg); in do_message_pass() 365 smp_muxed_ipi_message_pass(cpu, msg); in do_message_pass() 369 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument 372 do_message_pass(cpu, PPC_MSG_RESCHEDULE); in smp_send_reschedule() [all …]
|
D | watchdog.c | 117 int cpu = raw_smp_processor_id(); in wd_lockup_ipi() local 120 pr_emerg("CPU %d Hard LOCKUP\n", cpu); in wd_lockup_ipi() 122 cpu, tb, per_cpu(wd_timer_tb, cpu), in wd_lockup_ipi() 123 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); in wd_lockup_ipi() 149 static void set_cpu_stuck(int cpu, u64 tb) in set_cpu_stuck() argument 151 set_cpumask_stuck(cpumask_of(cpu), tb); in set_cpu_stuck() 154 static void watchdog_smp_panic(int cpu, u64 tb) in watchdog_smp_panic() argument 163 if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) in watchdog_smp_panic() 169 cpu, cpumask_pr_args(&wd_smp_cpus_pending)); in watchdog_smp_panic() 171 cpu, tb, wd_smp_last_reset_tb, in watchdog_smp_panic() [all …]
|
/arch/x86/xen/ |
D | smp.c | 33 void xen_smp_intr_free(unsigned int cpu) in xen_smp_intr_free() argument 35 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free() 36 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free() 37 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { in xen_smp_intr_free() 38 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); in xen_smp_intr_free() 39 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free() 41 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free() 42 per_cpu(xen_callfunc_irq, cpu).name = NULL; in xen_smp_intr_free() 43 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { in xen_smp_intr_free() 44 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); in xen_smp_intr_free() [all …]
|
D | smp_pv.c | 63 int cpu; in cpu_bringup() local 75 cpu = smp_processor_id(); in cpu_bringup() 76 smp_store_cpu_info(cpu); in cpu_bringup() 77 cpu_data(cpu).x86_max_cores = 1; in cpu_bringup() 78 set_cpu_sibling_map(cpu); in cpu_bringup() 84 notify_cpu_starting(cpu); in cpu_bringup() 86 set_cpu_online(cpu, true); in cpu_bringup() 88 cpu_set_state_online(cpu); /* Implies full memory barrier. */ in cpu_bringup() 100 void xen_smp_intr_free_pv(unsigned int cpu) in xen_smp_intr_free_pv() argument 102 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free_pv() [all …]
|
/arch/s390/include/asm/ |
D | topology.h | 9 struct cpu; 29 #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) argument 30 #define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) argument 31 #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask) argument 32 #define topology_core_id(cpu) (cpu_topology[cpu].core_id) argument 33 #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) argument 34 #define topology_book_id(cpu) (cpu_topology[cpu].book_id) argument 35 #define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask) argument 36 #define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id) argument 37 #define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask) argument [all …]
|
/arch/arm/mach-meson/ |
D | platsmp.c | 38 static struct reset_control *meson_smp_get_core_reset(int cpu) in meson_smp_get_core_reset() argument 40 struct device_node *np = of_get_cpu_node(cpu, 0); in meson_smp_get_core_reset() 45 static void meson_smp_set_cpu_ctrl(int cpu, bool on_off) in meson_smp_set_cpu_ctrl() argument 50 val |= BIT(cpu); in meson_smp_set_cpu_ctrl() 52 val &= ~BIT(cpu); in meson_smp_set_cpu_ctrl() 116 static void meson_smp_begin_secondary_boot(unsigned int cpu) in meson_smp_begin_secondary_boot() argument 125 sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu)); in meson_smp_begin_secondary_boot() 131 scu_cpu_power_enable(scu_base, cpu); in meson_smp_begin_secondary_boot() 134 static int meson_smp_finalize_secondary_boot(unsigned int cpu) in meson_smp_finalize_secondary_boot() argument 139 while (readl(sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu))) { in meson_smp_finalize_secondary_boot() [all …]
|
/arch/arm/mach-bcm/ |
D | platsmp-brcmstb.c | 67 static int per_cpu_sw_state_rd(u32 cpu) in per_cpu_sw_state_rd() argument 69 sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); in per_cpu_sw_state_rd() 70 return per_cpu(per_cpu_sw_state, cpu); in per_cpu_sw_state_rd() 73 static void per_cpu_sw_state_wr(u32 cpu, int val) in per_cpu_sw_state_wr() argument 76 per_cpu(per_cpu_sw_state, cpu) = val; in per_cpu_sw_state_wr() 77 sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); in per_cpu_sw_state_wr() 80 static inline void per_cpu_sw_state_wr(u32 cpu, int val) { } in per_cpu_sw_state_wr() argument 83 static void __iomem *pwr_ctrl_get_base(u32 cpu) in pwr_ctrl_get_base() argument 86 base += (cpu_logical_map(cpu) * 4); in pwr_ctrl_get_base() 90 static u32 pwr_ctrl_rd(u32 cpu) in pwr_ctrl_rd() argument [all …]
|
/arch/x86/include/asm/ |
D | topology.h | 48 extern int __cpu_to_node(int cpu); 51 extern int early_cpu_to_node(int cpu); 56 static inline int early_cpu_to_node(int cpu) in early_cpu_to_node() argument 58 return early_per_cpu(x86_cpu_to_node_map, cpu); in early_cpu_to_node() 94 static inline int early_cpu_to_node(int cpu) in early_cpu_to_node() argument 105 extern const struct cpumask *cpu_coregroup_mask(int cpu); 107 #define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id) argument 108 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) argument 109 #define topology_logical_die_id(cpu) (cpu_data(cpu).logical_die_id) argument 110 #define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id) argument [all …]
|
/arch/arc/kernel/ |
D | setup.c | 70 static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu) in read_decode_ccm_bcr() argument 78 cpu->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */ in read_decode_ccm_bcr() 79 cpu->iccm.base_addr = iccm.base << 16; in read_decode_ccm_bcr() 85 cpu->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */ in read_decode_ccm_bcr() 88 cpu->dccm.base_addr = base & ~0xF; in read_decode_ccm_bcr() 97 cpu->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */ in read_decode_ccm_bcr() 99 cpu->iccm.sz <<= iccm.sz01; in read_decode_ccm_bcr() 102 cpu->iccm.base_addr = region & 0xF0000000; in read_decode_ccm_bcr() 107 cpu->dccm.sz = 256 << dccm.sz0; in read_decode_ccm_bcr() 109 cpu->dccm.sz <<= dccm.sz1; in read_decode_ccm_bcr() [all …]
|
/arch/riscv/kernel/ |
D | cpu-hotplug.c | 24 bool cpu_has_hotplug(unsigned int cpu) in cpu_has_hotplug() argument 26 if (cpu_ops[cpu]->cpu_stop) in cpu_has_hotplug() 38 unsigned int cpu = smp_processor_id(); in __cpu_disable() local 40 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_stop) in __cpu_disable() 43 if (cpu_ops[cpu]->cpu_disable) in __cpu_disable() 44 ret = cpu_ops[cpu]->cpu_disable(cpu); in __cpu_disable() 49 remove_cpu_topology(cpu); in __cpu_disable() 50 numa_remove_cpu(cpu); in __cpu_disable() 51 set_cpu_online(cpu, false); in __cpu_disable() 60 void __cpu_die(unsigned int cpu) in __cpu_die() argument [all …]
|
/arch/xtensa/include/asm/ |
D | mmu_context.h | 35 #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu) argument 70 static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_new_mmu_context() argument 72 unsigned long asid = cpu_asid_cache(cpu); in get_new_mmu_context() 81 cpu_asid_cache(cpu) = asid; in get_new_mmu_context() 82 mm->context.asid[cpu] = asid; in get_new_mmu_context() 83 mm->context.cpu = cpu; in get_new_mmu_context() 86 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_mmu_context() argument 93 unsigned long asid = mm->context.asid[cpu]; in get_mmu_context() 96 ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK)) in get_mmu_context() 97 get_new_mmu_context(mm, cpu); in get_mmu_context() [all …]
|
/arch/ia64/kernel/ |
D | err_inject.c | 61 u32 cpu=dev->id; \ 62 return sprintf(buf, "%llx\n", name[cpu]); \ 70 unsigned int cpu=dev->id; \ 71 name[cpu] = simple_strtoull(buf, NULL, 16); \ 84 unsigned int cpu=dev->id; in show() local 88 printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); in show() 89 printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]); in show() 90 printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]); in show() 92 err_data_buffer[cpu].data1, in show() 93 err_data_buffer[cpu].data2, in show() [all …]
|
/arch/arm64/kernel/ |
D | topology.c | 28 static bool __init acpi_cpu_is_threaded(int cpu) in acpi_cpu_is_threaded() argument 30 int is_threaded = acpi_pptt_cpu_is_thread(cpu); in acpi_cpu_is_threaded() 48 int cpu, topology_id; in parse_acpi_topology() local 53 for_each_possible_cpu(cpu) { in parse_acpi_topology() 56 topology_id = find_acpi_cpu_topology(cpu, 0); in parse_acpi_topology() 60 if (acpi_cpu_is_threaded(cpu)) { in parse_acpi_topology() 61 cpu_topology[cpu].thread_id = topology_id; in parse_acpi_topology() 62 topology_id = find_acpi_cpu_topology(cpu, 1); in parse_acpi_topology() 63 cpu_topology[cpu].core_id = topology_id; in parse_acpi_topology() 65 cpu_topology[cpu].thread_id = -1; in parse_acpi_topology() [all …]
|
D | smp.c | 89 static void ipi_setup(int cpu); 92 static void ipi_teardown(int cpu); 93 static int op_cpu_kill(unsigned int cpu); 95 static inline int op_cpu_kill(unsigned int cpu) in op_cpu_kill() argument 106 static int boot_secondary(unsigned int cpu, struct task_struct *idle) in boot_secondary() argument 108 const struct cpu_operations *ops = get_cpu_ops(cpu); in boot_secondary() 111 return ops->cpu_boot(cpu); in boot_secondary() 118 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument 131 ret = boot_secondary(cpu, idle); in __cpu_up() 133 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); in __cpu_up() [all …]
|
/arch/sh/include/asm/ |
D | mmu_context.h | 37 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) argument 40 #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) argument 42 #define cpu_asid(cpu, mm) \ argument 43 (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK) 55 static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) in get_mmu_context() argument 57 unsigned long asid = asid_cache(cpu); in get_mmu_context() 60 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context() 80 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_mmu_context() 103 static inline void activate_context(struct mm_struct *mm, unsigned int cpu) in activate_context() argument 105 get_mmu_context(mm, cpu); in activate_context() [all …]
|
/arch/x86/kernel/ |
D | setup_percpu.c | 71 unsigned int cpu; in pcpu_need_numa() local 73 for_each_possible_cpu(cpu) { in pcpu_need_numa() 74 int node = early_cpu_to_node(cpu); in pcpu_need_numa() 100 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, in pcpu_alloc_bootmem() argument 105 int node = early_cpu_to_node(cpu); in pcpu_alloc_bootmem() 111 cpu, node); in pcpu_alloc_bootmem() 113 cpu, size, __pa(ptr)); in pcpu_alloc_bootmem() 120 cpu, size, node, __pa(ptr)); in pcpu_alloc_bootmem() 131 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) in pcpu_fc_alloc() argument 133 return pcpu_alloc_bootmem(cpu, size, align); in pcpu_fc_alloc() [all …]
|
/arch/arm/kernel/ |
D | smp.c | 91 static void ipi_setup(int cpu); 113 static int secondary_biglittle_prepare(unsigned int cpu) in secondary_biglittle_prepare() argument 115 if (!cpu_vtable[cpu]) in secondary_biglittle_prepare() 116 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL); in secondary_biglittle_prepare() 118 return cpu_vtable[cpu] ? 0 : -ENOMEM; in secondary_biglittle_prepare() 126 static int secondary_biglittle_prepare(unsigned int cpu) in secondary_biglittle_prepare() argument 136 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument 143 ret = secondary_biglittle_prepare(cpu); in __cpu_up() 165 ret = smp_ops.smp_boot_secondary(cpu, idle); in __cpu_up() 174 if (!cpu_online(cpu)) { in __cpu_up() [all …]
|
/arch/arm/common/ |
D | mcpm_entry.c | 34 static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_going_down() argument 36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down() 37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down() 47 static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_down() argument 50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down() 51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down() 82 static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) in __mcpm_outbound_enter_critical() argument 107 if (i == cpu) in __mcpm_outbound_enter_critical() 111 cpustate = c->cpus[i].cpu; in __mcpm_outbound_enter_critical() 116 sync_cache_r(&c->cpus[i].cpu); in __mcpm_outbound_enter_critical() [all …]
|