• Home
  • Raw
  • Download

Lines Matching full:cpu

23 #include <linux/cpu.h>
40 #include <asm/cpu.h>
84 static void ipi_setup(int cpu);
87 static void ipi_teardown(int cpu);
88 static int op_cpu_kill(unsigned int cpu);
90 static inline int op_cpu_kill(unsigned int cpu) in op_cpu_kill() argument
98 * Boot a secondary CPU, and assign it the specified idle task.
99 * This also gives us the initial stack to use for this CPU.
101 static int boot_secondary(unsigned int cpu, struct task_struct *idle) in boot_secondary() argument
103 const struct cpu_operations *ops = get_cpu_ops(cpu); in boot_secondary()
106 return ops->cpu_boot(cpu); in boot_secondary()
113 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument
127 /* Now bring the CPU into our world */ in __cpu_up()
128 ret = boot_secondary(cpu, idle); in __cpu_up()
130 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); in __cpu_up()
135 * CPU was successfully started, wait for it to come online or in __cpu_up()
140 if (cpu_online(cpu)) in __cpu_up()
143 pr_crit("CPU%u: failed to come online\n", cpu); in __cpu_up()
153 pr_err("CPU%u: failed in unknown state : 0x%lx\n", in __cpu_up()
154 cpu, status); in __cpu_up()
158 if (!op_cpu_kill(cpu)) { in __cpu_up()
159 pr_crit("CPU%u: died during early boot\n", cpu); in __cpu_up()
162 pr_crit("CPU%u: may not have shut down cleanly\n", cpu); in __cpu_up()
165 pr_crit("CPU%u: is stuck in kernel\n", cpu); in __cpu_up()
167 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); in __cpu_up()
169 pr_crit("CPU%u: does not support %luK granule\n", in __cpu_up()
170 cpu, PAGE_SIZE / SZ_1K); in __cpu_up()
175 panic("CPU%u detected unsupported configuration\n", cpu); in __cpu_up()
196 * This is the secondary CPU boot entry. We're using this CPUs
204 unsigned int cpu; in secondary_start_kernel() local
206 cpu = task_cpu(current); in secondary_start_kernel()
207 set_my_cpu_offset(per_cpu_offset(cpu)); in secondary_start_kernel()
225 rcu_cpu_starting(cpu); in secondary_start_kernel()
230 * this CPU ticks all of those. If it doesn't, the CPU will in secondary_start_kernel()
235 ops = get_cpu_ops(cpu); in secondary_start_kernel()
240 * Log the CPU info before it is marked online and might get read. in secondary_start_kernel()
247 notify_cpu_starting(cpu); in secondary_start_kernel()
249 ipi_setup(cpu); in secondary_start_kernel()
251 store_cpu_topology(cpu); in secondary_start_kernel()
252 numa_add_cpu(cpu); in secondary_start_kernel()
255 * OK, now it's safe to let the boot CPU continue. Wait for in secondary_start_kernel()
256 * the CPU migration code to notice that the CPU is online in secondary_start_kernel()
259 pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n", in secondary_start_kernel()
260 cpu, (unsigned long)mpidr, in secondary_start_kernel()
263 set_cpu_online(cpu, true); in secondary_start_kernel()
275 static int op_cpu_disable(unsigned int cpu) in op_cpu_disable() argument
277 const struct cpu_operations *ops = get_cpu_ops(cpu); in op_cpu_disable()
291 return ops->cpu_disable(cpu); in op_cpu_disable()
301 unsigned int cpu = smp_processor_id(); in __cpu_disable() local
304 ret = op_cpu_disable(cpu); in __cpu_disable()
308 remove_cpu_topology(cpu); in __cpu_disable()
309 numa_remove_cpu(cpu); in __cpu_disable()
312 * Take this CPU offline. Once we clear this, we can't return, in __cpu_disable()
313 * and we must not schedule until we're ready to give up the cpu. in __cpu_disable()
315 set_cpu_online(cpu, false); in __cpu_disable()
316 ipi_teardown(cpu); in __cpu_disable()
319 * OK - migrate IRQs away from this CPU in __cpu_disable()
326 static int op_cpu_kill(unsigned int cpu) in op_cpu_kill() argument
328 const struct cpu_operations *ops = get_cpu_ops(cpu); in op_cpu_kill()
331 * If we have no means of synchronising with the dying CPU, then assume in op_cpu_kill()
338 return ops->cpu_kill(cpu); in op_cpu_kill()
342 * called on the thread which is asking for a CPU to be shutdown -
345 void __cpu_die(unsigned int cpu) in __cpu_die() argument
349 if (!cpu_wait_death(cpu, 5)) { in __cpu_die()
350 pr_crit("CPU%u: cpu didn't die\n", cpu); in __cpu_die()
353 pr_notice("CPU%u: shutdown\n", cpu); in __cpu_die()
356 * Now that the dying CPU is beyond the point of no return w.r.t. in __cpu_die()
361 err = op_cpu_kill(cpu); in __cpu_die()
363 pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err); in __cpu_die()
367 * Called from the idle thread for the CPU which has been shutdown.
372 unsigned int cpu = smp_processor_id(); in cpu_die() local
373 const struct cpu_operations *ops = get_cpu_ops(cpu); in cpu_die()
379 /* Tell __cpu_die() that this CPU is now safe to dispose of */ in cpu_die()
383 * Actually shutdown the CPU. This must never fail. The specific hotplug in cpu_die()
385 * no dirty lines are lost in the process of shutting down the CPU. in cpu_die()
387 ops->cpu_die(cpu); in cpu_die()
393 static void __cpu_try_die(int cpu) in __cpu_try_die() argument
396 const struct cpu_operations *ops = get_cpu_ops(cpu); in __cpu_try_die()
399 ops->cpu_die(cpu); in __cpu_try_die()
404 * Kill the calling secondary CPU, early in bringup before it is turned
409 int cpu = smp_processor_id(); in cpu_die_early() local
411 pr_crit("CPU%d: will not boot\n", cpu); in cpu_die_early()
413 /* Mark this CPU absent */ in cpu_die_early()
414 set_cpu_present(cpu, 0); in cpu_die_early()
415 rcu_report_dead(cpu); in cpu_die_early()
419 __cpu_try_die(cpu); in cpu_die_early()
430 pr_info("CPU: All CPU(s) started at EL2\n"); in hyp_mode_check()
433 "CPU: CPUs started in inconsistent modes"); in hyp_mode_check()
435 pr_info("CPU: All CPU(s) started at EL1\n"); in hyp_mode_check()
455 * We now know enough about the boot CPU to apply the in smp_prepare_boot_cpu()
472 * A cpu node with missing "reg" property is in of_get_cpu_mpidr()
496 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
499 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) in is_mpidr_duplicate() argument
503 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) in is_mpidr_duplicate()
510 * Initialize cpu operations for a logical cpu and
513 static int __init smp_cpu_setup(int cpu) in smp_cpu_setup() argument
517 if (init_cpu_ops(cpu)) in smp_cpu_setup()
520 ops = get_cpu_ops(cpu); in smp_cpu_setup()
521 if (ops->cpu_init(cpu)) in smp_cpu_setup()
524 set_cpu_possible(cpu, true); in smp_cpu_setup()
535 struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) in acpi_cpu_get_madt_gicc() argument
537 return &cpu_madt_gicc[cpu]; in acpi_cpu_get_madt_gicc()
552 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); in acpi_map_gic_cpu_interface()
557 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); in acpi_map_gic_cpu_interface()
562 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); in acpi_map_gic_cpu_interface()
566 /* Check if GICC structure of boot CPU is available in the MADT */ in acpi_map_gic_cpu_interface()
569 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", in acpi_map_gic_cpu_interface()
581 /* map the logical cpu id to cpu MPIDR */ in acpi_map_gic_cpu_interface()
587 * Set-up the ACPI parking protocol cpu entries in acpi_map_gic_cpu_interface()
592 * initialize the cpu if the parking protocol is in acpi_map_gic_cpu_interface()
630 * In ACPI, SMP and CPU NUMA information is provided in separate in acpi_parse_and_init_cpus()
633 * Thus, it is simpler to first create the cpu logical map through in acpi_parse_and_init_cpus()
647 * Enumerate the possible CPU set from the device tree and build the
648 * cpu logical map array containing MPIDR values related to logical
662 pr_err("%pOF: duplicate cpu reg properties in the DT\n", in of_parse_and_init_cpus()
668 * The numbering scheme requires that the boot CPU in of_parse_and_init_cpus()
675 pr_err("%pOF: duplicate boot cpu reg property in DT\n", in of_parse_and_init_cpus()
685 * initialized and the boot cpu doesn't need in of_parse_and_init_cpus()
687 * incrementing cpu. in of_parse_and_init_cpus()
695 pr_debug("cpu logical map 0x%llx\n", hwid); in of_parse_and_init_cpus()
705 * Enumerate the possible CPU set from the device tree or ACPI and build the
706 * cpu logical map array containing MPIDR values related to logical
723 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); in smp_init_cpus()
729 * the cpus so that cpu processor description entries (DT cpu nodes in smp_init_cpus()
730 * and ACPI MADT entries) can be retrieved by matching the cpu hwid in smp_init_cpus()
732 * If the cpu set-up fails, invalidate the cpu_logical_map entry. in smp_init_cpus()
746 unsigned int cpu; in smp_prepare_cpus() local
768 for_each_possible_cpu(cpu) { in smp_prepare_cpus()
770 per_cpu(cpu_number, cpu) = cpu; in smp_prepare_cpus()
772 if (cpu == smp_processor_id()) in smp_prepare_cpus()
775 ops = get_cpu_ops(cpu); in smp_prepare_cpus()
779 err = ops->cpu_prepare(cpu); in smp_prepare_cpus()
783 set_cpu_present(cpu, true); in smp_prepare_cpus()
784 numa_store_cpu_info(cpu); in smp_prepare_cpus()
792 S(IPI_CPU_STOP, "CPU stop interrupts"),
793 S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
796 S(IPI_WAKEUP, "CPU wake-up interrupts"),
805 unsigned int cpu, i; in arch_show_interrupts() local
811 for_each_online_cpu(cpu) in arch_show_interrupts()
812 seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); in arch_show_interrupts()
825 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
827 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); in arch_send_call_function_single_ipi()
867 static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) in ipi_cpu_crash_stop() argument
870 crash_save_cpu(regs, cpu); in ipi_cpu_crash_stop()
878 __cpu_try_die(cpu); in ipi_cpu_crash_stop()
890 unsigned int cpu = smp_processor_id(); in do_handle_IPI() local
910 ipi_cpu_crash_stop(cpu, get_irq_regs()); in do_handle_IPI()
930 WARN_ONCE(!acpi_parking_protocol_valid(cpu), in do_handle_IPI()
931 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", in do_handle_IPI()
932 cpu); in do_handle_IPI()
937 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); in do_handle_IPI()
957 static void ipi_setup(int cpu) in ipi_setup() argument
969 static void ipi_teardown(int cpu) in ipi_teardown() argument
1001 /* Setup the boot CPU immediately */ in set_smp_ipi_range()
1005 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument
1007 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); in smp_send_reschedule()
1018 * The number of CPUs online, not counting this CPU (which may not be
1072 * If this cpu is the only one alive at this point in time, online or in crash_smp_send_stop()